2024-11-22 13:33:51,505 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-22 13:33:51,516 main DEBUG Took 0.009600 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-22 13:33:51,517 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-22 13:33:51,517 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-22 13:33:51,518 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-22 13:33:51,519 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,526 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-22 13:33:51,538 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,539 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,540 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,540 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,541 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,541 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,542 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,542 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,543 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,543 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,544 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,544 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,544 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,545 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,545 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,545 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,546 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,546 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,547 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,547 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,547 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,548 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,548 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,548 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 13:33:51,549 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,549 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-22 13:33:51,550 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 13:33:51,552 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-22 13:33:51,553 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-22 13:33:51,554 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-22 13:33:51,555 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-22 13:33:51,555 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-22 13:33:51,562 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-22 13:33:51,564 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-22 13:33:51,566 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-22 13:33:51,566 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-22 13:33:51,567 main DEBUG createAppenders(={Console}) 2024-11-22 13:33:51,567 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-22 13:33:51,568 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-22 13:33:51,568 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-22 13:33:51,568 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-22 13:33:51,569 main DEBUG OutputStream closed 2024-11-22 13:33:51,569 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-22 13:33:51,569 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-22 13:33:51,569 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-22 13:33:51,635 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-22 13:33:51,638 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-22 13:33:51,639 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-22 13:33:51,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-22 13:33:51,640 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-22 13:33:51,641 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-22 13:33:51,641 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-22 13:33:51,641 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-22 13:33:51,642 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-22 13:33:51,642 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-22 13:33:51,642 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-22 13:33:51,643 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-22 13:33:51,643 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-22 13:33:51,643 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-22 13:33:51,644 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-22 13:33:51,644 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-22 13:33:51,644 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-22 13:33:51,645 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-22 13:33:51,647 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22 13:33:51,648 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-22 13:33:51,648 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-22 13:33:51,649 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-22T13:33:51,870 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0 2024-11-22 13:33:51,873 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-22 13:33:51,874 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22T13:33:51,882 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-22T13:33:51,919 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=161, ProcessCount=11, AvailableMemoryMB=3587 2024-11-22T13:33:51,922 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T13:33:51,941 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4, deleteOnExit=true 2024-11-22T13:33:51,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T13:33:51,943 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/test.cache.data in system properties and HBase conf 2024-11-22T13:33:51,944 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T13:33:51,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.log.dir in system properties and HBase conf 2024-11-22T13:33:51,946 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T13:33:51,946 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T13:33:51,947 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T13:33:52,039 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-22T13:33:52,123 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T13:33:52,126 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:33:52,127 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:33:52,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T13:33:52,128 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:33:52,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T13:33:52,129 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T13:33:52,130 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:33:52,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:33:52,131 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T13:33:52,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/nfs.dump.dir in system properties and HBase conf 2024-11-22T13:33:52,132 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/java.io.tmpdir in system properties and HBase conf 2024-11-22T13:33:52,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:33:52,133 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T13:33:52,134 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T13:33:52,587 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:33:53,172 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T13:33:53,244 INFO [Time-limited test {}] log.Log(170): Logging initialized @2423ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-22T13:33:53,313 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:33:53,370 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:33:53,389 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:33:53,390 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:33:53,391 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:33:53,402 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:33:53,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:33:53,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:33:53,586 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/java.io.tmpdir/jetty-localhost-33575-hadoop-hdfs-3_4_1-tests_jar-_-any-1855125999839809798/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:33:53,592 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:33575} 2024-11-22T13:33:53,592 INFO [Time-limited test {}] server.Server(415): Started @2772ms 2024-11-22T13:33:53,620 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:33:54,151 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:33:54,159 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:33:54,160 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:33:54,160 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:33:54,160 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:33:54,163 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:33:54,164 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:33:54,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c2fdbac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/java.io.tmpdir/jetty-localhost-40495-hadoop-hdfs-3_4_1-tests_jar-_-any-3790296917051848070/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:33:54,265 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:40495} 2024-11-22T13:33:54,266 INFO [Time-limited test {}] server.Server(415): Started @3445ms 2024-11-22T13:33:54,318 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:33:54,428 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:33:54,436 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:33:54,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:33:54,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:33:54,438 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:33:54,440 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:33:54,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:33:54,540 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1467625d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/java.io.tmpdir/jetty-localhost-41709-hadoop-hdfs-3_4_1-tests_jar-_-any-6845239727048889691/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:33:54,541 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:41709} 2024-11-22T13:33:54,541 INFO [Time-limited test {}] server.Server(415): Started @3721ms 2024-11-22T13:33:54,544 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:33:55,593 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/data/data1/current/BP-1289964154-172.17.0.2-1732282432667/current, will proceed with Du for space computation calculation, 2024-11-22T13:33:55,593 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/data/data2/current/BP-1289964154-172.17.0.2-1732282432667/current, will proceed with Du for space computation calculation, 2024-11-22T13:33:55,593 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/data/data3/current/BP-1289964154-172.17.0.2-1732282432667/current, will proceed with Du for space computation calculation, 2024-11-22T13:33:55,593 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/data/data4/current/BP-1289964154-172.17.0.2-1732282432667/current, will proceed with Du for space computation calculation, 2024-11-22T13:33:55,625 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:33:55,625 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:33:55,667 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0 2024-11-22T13:33:55,670 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd447f2fddf096efa with lease ID 0x7cb1dcfd6dee0ecb: Processing first storage report for DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3 from datanode DatanodeRegistration(127.0.0.1:42491, datanodeUuid=819c0b05-e0fe-445e-b118-1dcead9b721b, infoPort=38207, infoSecurePort=0, ipcPort=36537, storageInfo=lv=-57;cid=testClusterID;nsid=1272123649;c=1732282432667) 2024-11-22T13:33:55,672 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd447f2fddf096efa with lease ID 0x7cb1dcfd6dee0ecb: from storage DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3 node DatanodeRegistration(127.0.0.1:42491, datanodeUuid=819c0b05-e0fe-445e-b118-1dcead9b721b, infoPort=38207, infoSecurePort=0, ipcPort=36537, storageInfo=lv=-57;cid=testClusterID;nsid=1272123649;c=1732282432667), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T13:33:55,672 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1a932d0f98fc18bc with lease ID 0x7cb1dcfd6dee0eca: Processing first storage report for DS-77e78628-9332-4b63-a99f-3ec4777b621f from datanode DatanodeRegistration(127.0.0.1:42593, datanodeUuid=4b0861cf-737c-45f8-ab27-73d17d439b35, infoPort=42009, infoSecurePort=0, ipcPort=34283, storageInfo=lv=-57;cid=testClusterID;nsid=1272123649;c=1732282432667) 2024-11-22T13:33:55,672 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a932d0f98fc18bc with lease ID 0x7cb1dcfd6dee0eca: from storage DS-77e78628-9332-4b63-a99f-3ec4777b621f node DatanodeRegistration(127.0.0.1:42593, datanodeUuid=4b0861cf-737c-45f8-ab27-73d17d439b35, infoPort=42009, infoSecurePort=0, ipcPort=34283, storageInfo=lv=-57;cid=testClusterID;nsid=1272123649;c=1732282432667), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:33:55,672 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd447f2fddf096efa with lease ID 0x7cb1dcfd6dee0ecb: Processing first storage report for DS-a1946033-03ea-4fe3-aafe-13fa9f957998 from datanode DatanodeRegistration(127.0.0.1:42491, datanodeUuid=819c0b05-e0fe-445e-b118-1dcead9b721b, infoPort=38207, infoSecurePort=0, ipcPort=36537, storageInfo=lv=-57;cid=testClusterID;nsid=1272123649;c=1732282432667) 2024-11-22T13:33:55,673 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd447f2fddf096efa with lease ID 0x7cb1dcfd6dee0ecb: from storage DS-a1946033-03ea-4fe3-aafe-13fa9f957998 node DatanodeRegistration(127.0.0.1:42491, datanodeUuid=819c0b05-e0fe-445e-b118-1dcead9b721b, infoPort=38207, infoSecurePort=0, ipcPort=36537, storageInfo=lv=-57;cid=testClusterID;nsid=1272123649;c=1732282432667), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:33:55,673 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1a932d0f98fc18bc with lease ID 0x7cb1dcfd6dee0eca: Processing first storage report for DS-9bf8486f-f47a-4466-a8a9-40b93a8e8195 from datanode DatanodeRegistration(127.0.0.1:42593, datanodeUuid=4b0861cf-737c-45f8-ab27-73d17d439b35, infoPort=42009, infoSecurePort=0, ipcPort=34283, storageInfo=lv=-57;cid=testClusterID;nsid=1272123649;c=1732282432667) 2024-11-22T13:33:55,673 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1a932d0f98fc18bc with lease ID 0x7cb1dcfd6dee0eca: from storage DS-9bf8486f-f47a-4466-a8a9-40b93a8e8195 node DatanodeRegistration(127.0.0.1:42593, datanodeUuid=4b0861cf-737c-45f8-ab27-73d17d439b35, infoPort=42009, infoSecurePort=0, ipcPort=34283, storageInfo=lv=-57;cid=testClusterID;nsid=1272123649;c=1732282432667), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:33:55,742 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/zookeeper_0, clientPort=53314, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T13:33:55,751 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53314 2024-11-22T13:33:55,762 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:33:55,764 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:33:55,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:33:55,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:33:56,367 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f with version=8 2024-11-22T13:33:56,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/hbase-staging 2024-11-22T13:33:56,456 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-22T13:33:56,701 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:33:56,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:33:56,711 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:33:56,717 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:33:56,717 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:33:56,717 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:33:56,848 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T13:33:56,902 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-22T13:33:56,911 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-22T13:33:56,914 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:33:56,936 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 98281 (auto-detected) 2024-11-22T13:33:56,937 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-22T13:33:56,954 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41345 2024-11-22T13:33:56,981 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41345 connecting to ZooKeeper ensemble=127.0.0.1:53314 2024-11-22T13:33:57,101 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:413450x0, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:33:57,104 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41345-0x10162c0443a0000 connected 2024-11-22T13:33:57,214 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:33:57,218 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:33:57,228 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:33:57,232 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f, hbase.cluster.distributed=false 2024-11-22T13:33:57,254 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:33:57,259 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41345 2024-11-22T13:33:57,259 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41345 2024-11-22T13:33:57,260 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41345 2024-11-22T13:33:57,260 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41345 2024-11-22T13:33:57,260 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41345 2024-11-22T13:33:57,360 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:33:57,362 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:33:57,362 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:33:57,363 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:33:57,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:33:57,363 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:33:57,366 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T13:33:57,368 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:33:57,369 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45803 2024-11-22T13:33:57,371 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45803 connecting to ZooKeeper ensemble=127.0.0.1:53314 2024-11-22T13:33:57,372 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:33:57,377 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:33:57,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458030x0, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:33:57,391 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:458030x0, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:33:57,391 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45803-0x10162c0443a0001 connected 2024-11-22T13:33:57,396 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T13:33:57,407 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T13:33:57,410 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T13:33:57,416 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:33:57,417 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45803 2024-11-22T13:33:57,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45803 2024-11-22T13:33:57,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45803 2024-11-22T13:33:57,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45803 2024-11-22T13:33:57,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45803 2024-11-22T13:33:57,439 DEBUG [M:0;e025332d312f:41345 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e025332d312f:41345 2024-11-22T13:33:57,440 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e025332d312f,41345,1732282436551 2024-11-22T13:33:57,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:33:57,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:33:57,456 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e025332d312f,41345,1732282436551 2024-11-22T13:33:57,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:57,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T13:33:57,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:57,486 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T13:33:57,487 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e025332d312f,41345,1732282436551 from backup master directory 2024-11-22T13:33:57,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e025332d312f,41345,1732282436551 2024-11-22T13:33:57,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:33:57,496 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:33:57,496 WARN [master/e025332d312f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:33:57,497 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e025332d312f,41345,1732282436551 2024-11-22T13:33:57,499 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-22T13:33:57,501 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-22T13:33:57,556 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/hbase.id] with ID: 62d83833-31e1-4385-bf9d-562f4984ac94 2024-11-22T13:33:57,556 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/.tmp/hbase.id 2024-11-22T13:33:57,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:33:57,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:33:57,570 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/.tmp/hbase.id]:[hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/hbase.id] 2024-11-22T13:33:57,614 INFO [master/e025332d312f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:33:57,620 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T13:33:57,638 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-22T13:33:57,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:57,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:57,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:33:57,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:33:57,687 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:33:57,689 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T13:33:57,697 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:33:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:33:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:33:57,745 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store 2024-11-22T13:33:57,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:33:57,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:33:57,769 INFO [master/e025332d312f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-22T13:33:57,773 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:33:57,775 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:33:57,775 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:33:57,775 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:33:57,777 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-11-22T13:33:57,777 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:33:57,777 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:33:57,778 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282437775Disabling compacts and flushes for region at 1732282437775Disabling writes for close at 1732282437777 (+2 ms)Writing region close event to WAL at 1732282437777Closed at 1732282437777 2024-11-22T13:33:57,780 WARN [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/.initializing 2024-11-22T13:33:57,780 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/WALs/e025332d312f,41345,1732282436551 2024-11-22T13:33:57,803 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C41345%2C1732282436551, suffix=, logDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/WALs/e025332d312f,41345,1732282436551, archiveDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/oldWALs, maxLogs=10 2024-11-22T13:33:57,814 INFO [master/e025332d312f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41345%2C1732282436551.1732282437809 2024-11-22T13:33:57,834 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/WALs/e025332d312f,41345,1732282436551/e025332d312f%2C41345%2C1732282436551.1732282437809 2024-11-22T13:33:57,844 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42009:42009),(127.0.0.1/127.0.0.1:38207:38207)] 2024-11-22T13:33:57,846 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:33:57,846 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:33:57,849 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,850 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T13:33:57,909 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:57,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:33:57,912 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,916 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T13:33:57,916 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:57,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:33:57,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T13:33:57,920 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:57,921 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:33:57,921 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,924 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T13:33:57,924 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:57,925 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:33:57,926 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,929 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,931 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,936 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,937 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,941 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T13:33:57,945 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:33:57,949 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:33:57,951 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804381, jitterRate=0.022823438048362732}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T13:33:57,958 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732282437862Initializing all the Stores at 1732282437864 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282437865 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282437865Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282437866 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282437866Cleaning up temporary data from old regions at 1732282437937 (+71 ms)Region opened successfully at 1732282437957 (+20 ms) 2024-11-22T13:33:57,959 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T13:33:57,989 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb1dcfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:33:58,016 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T13:33:58,025 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T13:33:58,025 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T13:33:58,028 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T13:33:58,029 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-22T13:33:58,034 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-22T13:33:58,034 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T13:33:58,059 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T13:33:58,067 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T13:33:58,117 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T13:33:58,120 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T13:33:58,123 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T13:33:58,134 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T13:33:58,137 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T13:33:58,141 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T13:33:58,148 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T13:33:58,151 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T13:33:58,159 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T13:33:58,181 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T13:33:58,190 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T13:33:58,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:33:58,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:33:58,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:58,201 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:58,205 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e025332d312f,41345,1732282436551, sessionid=0x10162c0443a0000, setting cluster-up flag (Was=false) 2024-11-22T13:33:58,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:58,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:58,275 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T13:33:58,280 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,41345,1732282436551 2024-11-22T13:33:58,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:58,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:58,338 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T13:33:58,340 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,41345,1732282436551 2024-11-22T13:33:58,348 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T13:33:58,410 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T13:33:58,419 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T13:33:58,424 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T13:33:58,427 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(746): ClusterId : 62d83833-31e1-4385-bf9d-562f4984ac94 2024-11-22T13:33:58,430 DEBUG [RS:0;e025332d312f:45803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T13:33:58,430 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e025332d312f,41345,1732282436551 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T13:33:58,445 DEBUG [RS:0;e025332d312f:45803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T13:33:58,445 DEBUG [RS:0;e025332d312f:45803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T13:33:58,445 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:33:58,445 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:33:58,445 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:33:58,446 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:33:58,446 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e025332d312f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T13:33:58,446 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,446 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:33:58,446 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,447 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732282468447 2024-11-22T13:33:58,449 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T13:33:58,449 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T13:33:58,451 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:33:58,451 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T13:33:58,452 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T13:33:58,453 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T13:33:58,453 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T13:33:58,453 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T13:33:58,455 DEBUG [RS:0;e025332d312f:45803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T13:33:58,455 DEBUG [RS:0;e025332d312f:45803 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23984890, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:33:58,454 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,456 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:58,456 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T13:33:58,458 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T13:33:58,460 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T13:33:58,460 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T13:33:58,465 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T13:33:58,466 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T13:33:58,468 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282438467,5,FailOnTimeoutGroup] 2024-11-22T13:33:58,470 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282438468,5,FailOnTimeoutGroup] 2024-11-22T13:33:58,470 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,470 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T13:33:58,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:33:58,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:33:58,471 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,472 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,473 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T13:33:58,474 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f 2024-11-22T13:33:58,474 DEBUG [RS:0;e025332d312f:45803 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e025332d312f:45803 2024-11-22T13:33:58,478 INFO [RS:0;e025332d312f:45803 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T13:33:58,478 INFO [RS:0;e025332d312f:45803 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T13:33:58,478 DEBUG [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T13:33:58,481 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(2659): reportForDuty to master=e025332d312f,41345,1732282436551 with port=45803, startcode=1732282437327 2024-11-22T13:33:58,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:33:58,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:33:58,487 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:33:58,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:33:58,492 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:33:58,492 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:58,493 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:33:58,494 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:33:58,494 DEBUG [RS:0;e025332d312f:45803 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T13:33:58,496 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:33:58,497 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:58,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:33:58,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:33:58,500 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:33:58,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:58,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:33:58,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:33:58,504 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:33:58,505 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:58,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:33:58,506 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:33:58,507 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740 2024-11-22T13:33:58,508 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740 2024-11-22T13:33:58,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:33:58,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:33:58,512 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:33:58,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:33:58,518 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:33:58,519 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848986, jitterRate=0.07954218983650208}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:33:58,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732282438487Initializing all the Stores at 1732282438489 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282438489Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282438489Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282438489Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282438489Cleaning up temporary data from old regions at 1732282438511 (+22 ms)Region opened successfully at 1732282438523 (+12 ms) 2024-11-22T13:33:58,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:33:58,524 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:33:58,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:33:58,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:33:58,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:33:58,526 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:33:58,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282438524Disabling compacts and flushes for region at 1732282438524Disabling writes for close at 1732282438524Writing region close event to WAL at 1732282438526 (+2 ms)Closed at 1732282438526 2024-11-22T13:33:58,530 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:33:58,530 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T13:33:58,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T13:33:58,544 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:33:58,546 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T13:33:58,563 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38351, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T13:33:58,568 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41345 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e025332d312f,45803,1732282437327 2024-11-22T13:33:58,570 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41345 {}] master.ServerManager(517): Registering regionserver=e025332d312f,45803,1732282437327 2024-11-22T13:33:58,583 DEBUG [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f 2024-11-22T13:33:58,583 DEBUG [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37741 2024-11-22T13:33:58,583 DEBUG [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T13:33:58,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:33:58,598 DEBUG [RS:0;e025332d312f:45803 {}] zookeeper.ZKUtil(111): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e025332d312f,45803,1732282437327 2024-11-22T13:33:58,599 WARN [RS:0;e025332d312f:45803 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:33:58,599 INFO [RS:0;e025332d312f:45803 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:33:58,599 DEBUG [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327 2024-11-22T13:33:58,602 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e025332d312f,45803,1732282437327] 2024-11-22T13:33:58,624 INFO [RS:0;e025332d312f:45803 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T13:33:58,637 INFO [RS:0;e025332d312f:45803 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T13:33:58,641 INFO [RS:0;e025332d312f:45803 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T13:33:58,641 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,642 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T13:33:58,647 INFO [RS:0;e025332d312f:45803 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T13:33:58,649 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,649 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,649 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,649 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,649 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,649 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,650 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:33:58,650 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,650 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,650 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,650 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,650 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,650 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:33:58,650 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:33:58,651 DEBUG [RS:0;e025332d312f:45803 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:33:58,651 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,652 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,652 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,652 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,652 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,652 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45803,1732282437327-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:33:58,669 INFO [RS:0;e025332d312f:45803 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T13:33:58,671 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45803,1732282437327-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,671 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,671 INFO [RS:0;e025332d312f:45803 {}] regionserver.Replication(171): e025332d312f,45803,1732282437327 started 2024-11-22T13:33:58,687 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:58,688 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(1482): Serving as e025332d312f,45803,1732282437327, RpcServer on e025332d312f/172.17.0.2:45803, sessionid=0x10162c0443a0001 2024-11-22T13:33:58,688 DEBUG [RS:0;e025332d312f:45803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T13:33:58,688 DEBUG [RS:0;e025332d312f:45803 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e025332d312f,45803,1732282437327 2024-11-22T13:33:58,689 DEBUG [RS:0;e025332d312f:45803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,45803,1732282437327' 2024-11-22T13:33:58,689 DEBUG [RS:0;e025332d312f:45803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T13:33:58,690 DEBUG [RS:0;e025332d312f:45803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T13:33:58,690 DEBUG [RS:0;e025332d312f:45803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T13:33:58,690 DEBUG [RS:0;e025332d312f:45803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T13:33:58,690 DEBUG [RS:0;e025332d312f:45803 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e025332d312f,45803,1732282437327 2024-11-22T13:33:58,690 DEBUG [RS:0;e025332d312f:45803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,45803,1732282437327' 2024-11-22T13:33:58,691 DEBUG [RS:0;e025332d312f:45803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T13:33:58,691 DEBUG [RS:0;e025332d312f:45803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T13:33:58,692 DEBUG [RS:0;e025332d312f:45803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T13:33:58,692 INFO [RS:0;e025332d312f:45803 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T13:33:58,692 INFO [RS:0;e025332d312f:45803 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T13:33:58,697 WARN [e025332d312f:41345 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T13:33:58,804 INFO [RS:0;e025332d312f:45803 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C45803%2C1732282437327, suffix=, logDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327, archiveDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs, maxLogs=32 2024-11-22T13:33:58,806 INFO [RS:0;e025332d312f:45803 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.1732282438806 2024-11-22T13:33:58,815 INFO [RS:0;e025332d312f:45803 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282438806 2024-11-22T13:33:58,817 DEBUG [RS:0;e025332d312f:45803 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42009:42009),(127.0.0.1/127.0.0.1:38207:38207)] 2024-11-22T13:33:58,950 DEBUG [e025332d312f:41345 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T13:33:58,965 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e025332d312f,45803,1732282437327 2024-11-22T13:33:58,970 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,45803,1732282437327, state=OPENING 2024-11-22T13:33:59,053 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T13:33:59,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:59,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:33:59,067 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:33:59,067 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:33:59,070 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:33:59,073 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,45803,1732282437327}] 2024-11-22T13:33:59,257 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T13:33:59,260 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42345, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T13:33:59,271 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T13:33:59,271 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:33:59,275 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C45803%2C1732282437327.meta, suffix=.meta, logDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327, archiveDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs, maxLogs=32 2024-11-22T13:33:59,277 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.meta.1732282439277.meta 2024-11-22T13:33:59,285 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.meta.1732282439277.meta 2024-11-22T13:33:59,288 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42009:42009),(127.0.0.1/127.0.0.1:38207:38207)] 2024-11-22T13:33:59,291 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:33:59,293 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T13:33:59,295 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T13:33:59,299 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T13:33:59,303 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T13:33:59,304 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:33:59,304 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T13:33:59,304 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T13:33:59,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:33:59,310 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:33:59,310 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:59,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:33:59,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:33:59,313 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:33:59,313 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:59,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:33:59,314 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:33:59,315 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:33:59,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:59,316 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:33:59,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:33:59,318 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:33:59,318 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:59,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:33:59,319 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:33:59,321 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740 2024-11-22T13:33:59,323 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740 2024-11-22T13:33:59,326 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:33:59,326 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:33:59,327 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:33:59,329 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:33:59,331 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852397, jitterRate=0.08387984335422516}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:33:59,331 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T13:33:59,332 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732282439305Writing region info on filesystem at 1732282439305Initializing all the Stores at 1732282439307 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282439307Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282439307Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282439307Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282439307Cleaning up temporary data from old regions at 1732282439326 (+19 ms)Running coprocessor post-open hooks at 1732282439331 (+5 ms)Region opened successfully at 1732282439332 (+1 ms) 2024-11-22T13:33:59,338 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732282439248 2024-11-22T13:33:59,349 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T13:33:59,349 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T13:33:59,351 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,45803,1732282437327 2024-11-22T13:33:59,353 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,45803,1732282437327, state=OPEN 2024-11-22T13:33:59,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:33:59,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:33:59,475 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:33:59,475 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:33:59,476 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e025332d312f,45803,1732282437327 2024-11-22T13:33:59,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T13:33:59,482 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,45803,1732282437327 in 404 msec 2024-11-22T13:33:59,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T13:33:59,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 948 msec 2024-11-22T13:33:59,490 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:33:59,490 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T13:33:59,508 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:33:59,509 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,45803,1732282437327, seqNum=-1] 2024-11-22T13:33:59,525 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:33:59,527 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58043, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:33:59,546 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1700 sec 2024-11-22T13:33:59,547 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732282439546, completionTime=-1 2024-11-22T13:33:59,549 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T13:33:59,550 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T13:33:59,580 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T13:33:59,581 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732282499581 2024-11-22T13:33:59,581 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732282559581 2024-11-22T13:33:59,581 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 30 msec 2024-11-22T13:33:59,584 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41345,1732282436551-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:59,584 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41345,1732282436551-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:59,584 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41345,1732282436551-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:59,586 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e025332d312f:41345, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:59,586 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:59,587 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:59,592 DEBUG [master/e025332d312f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T13:33:59,612 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.115sec 2024-11-22T13:33:59,613 INFO [master/e025332d312f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T13:33:59,615 INFO [master/e025332d312f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T13:33:59,616 INFO [master/e025332d312f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T13:33:59,617 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T13:33:59,617 INFO [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T13:33:59,618 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41345,1732282436551-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:33:59,618 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41345,1732282436551-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T13:33:59,626 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T13:33:59,627 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T13:33:59,628 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41345,1732282436551-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:33:59,655 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c233f9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:33:59,658 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-22T13:33:59,658 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-22T13:33:59,662 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e025332d312f,41345,-1 for getting cluster id 2024-11-22T13:33:59,666 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T13:33:59,679 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '62d83833-31e1-4385-bf9d-562f4984ac94' 2024-11-22T13:33:59,682 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T13:33:59,683 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "62d83833-31e1-4385-bf9d-562f4984ac94" 2024-11-22T13:33:59,683 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c9db71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:33:59,683 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e025332d312f,41345,-1] 2024-11-22T13:33:59,687 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T13:33:59,689 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:33:59,691 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59926, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T13:33:59,694 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@783fe34b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:33:59,695 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:33:59,702 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,45803,1732282437327, seqNum=-1] 2024-11-22T13:33:59,702 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:33:59,705 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43438, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:33:59,722 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e025332d312f,41345,1732282436551 2024-11-22T13:33:59,723 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:33:59,729 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T13:33:59,734 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T13:33:59,738 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is e025332d312f,41345,1732282436551 2024-11-22T13:33:59,742 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@36ee107f 2024-11-22T13:33:59,743 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T13:33:59,745 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59936, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T13:33:59,747 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41345 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T13:33:59,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41345 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T13:33:59,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41345 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:33:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41345 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-22T13:33:59,761 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T13:33:59,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41345 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-22T13:33:59,763 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:33:59,765 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T13:33:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41345 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:33:59,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741835_1011 (size=389) 2024-11-22T13:33:59,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741835_1011 (size=389) 2024-11-22T13:33:59,828 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ff049cea83bd336017598e377fcd90f8, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f 2024-11-22T13:33:59,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741836_1012 (size=72) 2024-11-22T13:33:59,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741836_1012 (size=72) 2024-11-22T13:33:59,841 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:33:59,841 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing ff049cea83bd336017598e377fcd90f8, disabling compactions & flushes 2024-11-22T13:33:59,841 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:33:59,841 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:33:59,841 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. after waiting 0 ms 2024-11-22T13:33:59,841 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:33:59,841 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:33:59,841 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for ff049cea83bd336017598e377fcd90f8: Waiting for close lock at 1732282439841Disabling compacts and flushes for region at 1732282439841Disabling writes for close at 1732282439841Writing region close event to WAL at 1732282439841Closed at 1732282439841 2024-11-22T13:33:59,844 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T13:33:59,850 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732282439844"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732282439844"}]},"ts":"1732282439844"} 2024-11-22T13:33:59,855 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T13:33:59,857 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T13:33:59,860 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282439857"}]},"ts":"1732282439857"} 2024-11-22T13:33:59,864 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-22T13:33:59,866 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ff049cea83bd336017598e377fcd90f8, ASSIGN}] 2024-11-22T13:33:59,868 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ff049cea83bd336017598e377fcd90f8, ASSIGN 2024-11-22T13:33:59,870 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ff049cea83bd336017598e377fcd90f8, ASSIGN; state=OFFLINE, location=e025332d312f,45803,1732282437327; forceNewPlan=false, retain=false 2024-11-22T13:34:00,021 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ff049cea83bd336017598e377fcd90f8, regionState=OPENING, regionLocation=e025332d312f,45803,1732282437327 2024-11-22T13:34:00,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ff049cea83bd336017598e377fcd90f8, ASSIGN because future has completed 2024-11-22T13:34:00,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff049cea83bd336017598e377fcd90f8, server=e025332d312f,45803,1732282437327}] 2024-11-22T13:34:00,199 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:34:00,200 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => ff049cea83bd336017598e377fcd90f8, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:34:00,201 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,201 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:34:00,201 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,201 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,205 INFO [StoreOpener-ff049cea83bd336017598e377fcd90f8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,207 INFO [StoreOpener-ff049cea83bd336017598e377fcd90f8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ff049cea83bd336017598e377fcd90f8 columnFamilyName info 2024-11-22T13:34:00,208 DEBUG [StoreOpener-ff049cea83bd336017598e377fcd90f8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:34:00,209 INFO [StoreOpener-ff049cea83bd336017598e377fcd90f8-1 {}] regionserver.HStore(327): Store=ff049cea83bd336017598e377fcd90f8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:34:00,209 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,211 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,212 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,213 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,213 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,217 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,220 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:34:00,221 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened ff049cea83bd336017598e377fcd90f8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762968, jitterRate=-0.029837116599082947}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T13:34:00,222 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:00,223 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for ff049cea83bd336017598e377fcd90f8: Running coprocessor pre-open hook at 1732282440202Writing region info on filesystem at 1732282440202Initializing all the Stores at 1732282440204 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282440204Cleaning up temporary data from old regions at 1732282440213 (+9 ms)Running coprocessor post-open hooks at 1732282440222 (+9 ms)Region opened successfully at 1732282440223 (+1 ms) 2024-11-22T13:34:00,225 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8., pid=6, masterSystemTime=1732282440187 2024-11-22T13:34:00,229 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:34:00,229 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:34:00,230 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=ff049cea83bd336017598e377fcd90f8, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,45803,1732282437327 2024-11-22T13:34:00,234 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure ff049cea83bd336017598e377fcd90f8, server=e025332d312f,45803,1732282437327 because future has completed 2024-11-22T13:34:00,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T13:34:00,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure ff049cea83bd336017598e377fcd90f8, server=e025332d312f,45803,1732282437327 in 204 msec 2024-11-22T13:34:00,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T13:34:00,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=ff049cea83bd336017598e377fcd90f8, ASSIGN in 374 msec 2024-11-22T13:34:00,246 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T13:34:00,246 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282440246"}]},"ts":"1732282440246"} 2024-11-22T13:34:00,250 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-22T13:34:00,252 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T13:34:00,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 499 msec 2024-11-22T13:34:04,821 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T13:34:04,871 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T13:34:04,873 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-22T13:34:06,899 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T13:34:06,900 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T13:34:06,905 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T13:34:06,905 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T13:34:06,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:34:06,906 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T13:34:06,906 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T13:34:06,907 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T13:34:09,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41345 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:34:09,870 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-22T13:34:09,874 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-22T13:34:09,881 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-22T13:34:09,882 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:34:09,882 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.1732282449882 2024-11-22T13:34:09,892 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:09,892 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:09,892 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:09,892 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:09,892 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:09,893 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282438806 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282449882 2024-11-22T13:34:09,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741833_1009 (size=451) 2024-11-22T13:34:09,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741833_1009 (size=451) 2024-11-22T13:34:09,901 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42009:42009),(127.0.0.1/127.0.0.1:38207:38207)] 2024-11-22T13:34:09,901 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282438806 to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs/e025332d312f%2C45803%2C1732282437327.1732282438806 2024-11-22T13:34:09,909 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8., hostname=e025332d312f,45803,1732282437327, seqNum=2] 2024-11-22T13:34:21,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45803 {}] regionserver.HRegion(8855): Flush requested on ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:21,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ff049cea83bd336017598e377fcd90f8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:34:22,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/32612a2e4cfd4192a3f41de80a6e40e7 is 1080, key is row0001/info:/1732282449912/Put/seqid=0 2024-11-22T13:34:22,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741838_1014 (size=12509) 2024-11-22T13:34:22,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741838_1014 (size=12509) 2024-11-22T13:34:22,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/32612a2e4cfd4192a3f41de80a6e40e7 2024-11-22T13:34:22,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/32612a2e4cfd4192a3f41de80a6e40e7 as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/32612a2e4cfd4192a3f41de80a6e40e7 2024-11-22T13:34:22,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/32612a2e4cfd4192a3f41de80a6e40e7, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T13:34:22,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ff049cea83bd336017598e377fcd90f8 in 139ms, sequenceid=11, compaction requested=false 2024-11-22T13:34:22,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ff049cea83bd336017598e377fcd90f8: 2024-11-22T13:34:25,665 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T13:34:29,981 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.1732282469981 2024-11-22T13:34:30,200 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:30,200 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:30,200 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:30,201 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:30,201 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:30,201 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:30,201 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282449882 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282469981 2024-11-22T13:34:30,202 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42009:42009),(127.0.0.1/127.0.0.1:38207:38207)] 2024-11-22T13:34:30,203 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282449882 is not closed yet, will try archiving it next time 2024-11-22T13:34:30,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741837_1013 (size=12399) 2024-11-22T13:34:30,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741837_1013 (size=12399) 2024-11-22T13:34:30,407 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:32,614 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:34,820 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:37,028 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45803 {}] regionserver.HRegion(8855): Flush requested on ff049cea83bd336017598e377fcd90f8 2024-11-22T13:34:37,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ff049cea83bd336017598e377fcd90f8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:34:37,232 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:37,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/a1afd8cc002e492abcb055005221631e is 1080, key is row0008/info:/1732282463960/Put/seqid=0 2024-11-22T13:34:37,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741840_1016 (size=12509) 2024-11-22T13:34:37,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741840_1016 (size=12509) 2024-11-22T13:34:37,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/a1afd8cc002e492abcb055005221631e 2024-11-22T13:34:37,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/a1afd8cc002e492abcb055005221631e as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/a1afd8cc002e492abcb055005221631e 2024-11-22T13:34:37,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/a1afd8cc002e492abcb055005221631e, entries=7, sequenceid=21, filesize=12.2 K 2024-11-22T13:34:37,475 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:37,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ff049cea83bd336017598e377fcd90f8 in 447ms, sequenceid=21, compaction requested=false 2024-11-22T13:34:37,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ff049cea83bd336017598e377fcd90f8: 2024-11-22T13:34:37,476 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-22T13:34:37,477 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:34:37,478 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/32612a2e4cfd4192a3f41de80a6e40e7 because midkey is the same as first or last row 2024-11-22T13:34:39,236 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:40,199 INFO [master/e025332d312f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T13:34:40,199 INFO [master/e025332d312f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T13:34:41,444 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:41,448 WARN [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:41,450 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C45803%2C1732282437327:(num 1732282469981) roll requested 2024-11-22T13:34:41,451 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.1732282481450 2024-11-22T13:34:41,664 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:41,665 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:41,665 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:41,665 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:41,665 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:41,666 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:41,666 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282469981 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282481450 2024-11-22T13:34:41,667 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42009:42009),(127.0.0.1/127.0.0.1:38207:38207)] 2024-11-22T13:34:41,667 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282469981 is not closed yet, will try archiving it next time 2024-11-22T13:34:41,667 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282449882 to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs/e025332d312f%2C45803%2C1732282437327.1732282449882 2024-11-22T13:34:41,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741839_1015 (size=7739) 2024-11-22T13:34:41,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741839_1015 (size=7739) 2024-11-22T13:34:43,651 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:45,201 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ff049cea83bd336017598e377fcd90f8, had cached 0 bytes from a total of 25018 2024-11-22T13:34:45,857 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:48,095 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 234 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:50,303 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:52,307 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T13:34:52,308 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.1732282492308 2024-11-22T13:34:55,666 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T13:34:57,325 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5013 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:57,328 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5013 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK], DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK]] 2024-11-22T13:34:57,328 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C45803%2C1732282437327:(num 1732282492308) roll requested 2024-11-22T13:34:57,328 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:57,328 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:57,328 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:57,329 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:57,329 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:34:57,329 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282481450 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282492308 2024-11-22T13:34:57,330 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38207:38207),(127.0.0.1/127.0.0.1:42009:42009)] 2024-11-22T13:34:57,330 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282481450 is not closed yet, will try archiving it next time 2024-11-22T13:34:57,331 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.1732282497330 2024-11-22T13:34:57,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741841_1017 (size=4753) 2024-11-22T13:34:57,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741841_1017 (size=4753) 2024-11-22T13:35:02,335 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:02,336 WARN [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:02,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45803 {}] regionserver.HRegion(8855): Flush requested on ff049cea83bd336017598e377fcd90f8 2024-11-22T13:35:02,337 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ff049cea83bd336017598e377fcd90f8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:35:02,347 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:02,348 WARN [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:04,338 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T13:35:07,342 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:07,342 WARN [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:07,343 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:07,343 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:07,344 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:07,344 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:07,345 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:07,346 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282492308 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282497330 2024-11-22T13:35:07,348 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38207:38207),(127.0.0.1/127.0.0.1:42009:42009)] 2024-11-22T13:35:07,348 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282492308 is not closed yet, will try archiving it next time 2024-11-22T13:35:07,348 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C45803%2C1732282437327:(num 1732282497330) roll requested 2024-11-22T13:35:07,349 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.1732282507349 2024-11-22T13:35:07,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741842_1018 (size=1569) 2024-11-22T13:35:07,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741842_1018 (size=1569) 2024-11-22T13:35:07,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/7beed4fbe77445e9b55b92afa760d54d is 1080, key is row0015/info:/1732282479032/Put/seqid=0 2024-11-22T13:35:07,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741844_1020 (size=12509) 2024-11-22T13:35:07,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741844_1020 (size=12509) 2024-11-22T13:35:07,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/7beed4fbe77445e9b55b92afa760d54d 2024-11-22T13:35:07,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/7beed4fbe77445e9b55b92afa760d54d as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/7beed4fbe77445e9b55b92afa760d54d 2024-11-22T13:35:07,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/7beed4fbe77445e9b55b92afa760d54d, entries=7, sequenceid=31, filesize=12.2 K 2024-11-22T13:35:12,366 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:12,366 WARN [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:12,384 INFO [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:12,384 WARN [FSHLog-0-hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f-prefix:e025332d312f,45803,1732282437327 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42491,DS-cfd61f6e-c2a4-464e-8f42-a58012a0fea3,DISK], DatanodeInfoWithStorage[127.0.0.1:42593,DS-77e78628-9332-4b63-a99f-3ec4777b621f,DISK]] 2024-11-22T13:35:12,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ff049cea83bd336017598e377fcd90f8 in 10048ms, sequenceid=31, compaction requested=true 2024-11-22T13:35:12,384 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ff049cea83bd336017598e377fcd90f8: 2024-11-22T13:35:12,384 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,385 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-22T13:35:12,385 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,385 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:35:12,385 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,385 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/32612a2e4cfd4192a3f41de80a6e40e7 because midkey is the same as first or last row 2024-11-22T13:35:12,385 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,385 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282497330 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282507349 2024-11-22T13:35:12,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ff049cea83bd336017598e377fcd90f8:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:35:12,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741843_1019 (size=438) 2024-11-22T13:35:12,388 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42009:42009),(127.0.0.1/127.0.0.1:38207:38207)] 2024-11-22T13:35:12,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741843_1019 (size=438) 2024-11-22T13:35:12,388 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282497330 is not closed yet, will try archiving it next time 2024-11-22T13:35:12,388 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282469981 to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs/e025332d312f%2C45803%2C1732282437327.1732282469981 2024-11-22T13:35:12,388 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C45803%2C1732282437327:(num 1732282507349) roll requested 2024-11-22T13:35:12,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:35:12,388 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.1732282512388 2024-11-22T13:35:12,389 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:35:12,390 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282481450 to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs/e025332d312f%2C45803%2C1732282437327.1732282481450 2024-11-22T13:35:12,392 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282492308 to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs/e025332d312f%2C45803%2C1732282437327.1732282492308 2024-11-22T13:35:12,392 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:35:12,394 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282497330 to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs/e025332d312f%2C45803%2C1732282437327.1732282497330 2024-11-22T13:35:12,394 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.HStore(1541): ff049cea83bd336017598e377fcd90f8/info is initiating minor compaction (all files) 2024-11-22T13:35:12,395 INFO [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of ff049cea83bd336017598e377fcd90f8/info in TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:35:12,395 INFO [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/32612a2e4cfd4192a3f41de80a6e40e7, hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/a1afd8cc002e492abcb055005221631e, hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/7beed4fbe77445e9b55b92afa760d54d] into tmpdir=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp, totalSize=36.6 K 2024-11-22T13:35:12,396 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 32612a2e4cfd4192a3f41de80a6e40e7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732282449912 2024-11-22T13:35:12,397 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,397 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,397 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,397 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,397 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] compactions.Compactor(225): Compacting a1afd8cc002e492abcb055005221631e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732282463960 2024-11-22T13:35:12,397 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,398 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282507349 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282512388 2024-11-22T13:35:12,398 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7beed4fbe77445e9b55b92afa760d54d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732282479032 2024-11-22T13:35:12,398 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42009:42009),(127.0.0.1/127.0.0.1:38207:38207)] 2024-11-22T13:35:12,398 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282507349 is not closed yet, will try archiving it next time 2024-11-22T13:35:12,399 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45803%2C1732282437327.1732282512399 2024-11-22T13:35:12,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741845_1021 (size=93) 2024-11-22T13:35:12,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741845_1021 (size=93) 2024-11-22T13:35:12,401 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282507349 to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs/e025332d312f%2C45803%2C1732282437327.1732282507349 2024-11-22T13:35:12,406 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,406 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,407 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,407 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,407 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:12,407 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282512388 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/WALs/e025332d312f,45803,1732282437327/e025332d312f%2C45803%2C1732282437327.1732282512399 2024-11-22T13:35:12,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741846_1022 (size=1258) 2024-11-22T13:35:12,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741846_1022 (size=1258) 2024-11-22T13:35:12,418 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42009:42009),(127.0.0.1/127.0.0.1:38207:38207)] 2024-11-22T13:35:12,433 INFO [RS:0;e025332d312f:45803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ff049cea83bd336017598e377fcd90f8#info#compaction#3 average throughput is 7.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:35:12,434 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/22946e2c04424c85abf0268918b1d29f is 1080, key is row0001/info:/1732282449912/Put/seqid=0 2024-11-22T13:35:12,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741848_1024 (size=27710) 2024-11-22T13:35:12,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741848_1024 (size=27710) 2024-11-22T13:35:12,454 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/22946e2c04424c85abf0268918b1d29f as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/22946e2c04424c85abf0268918b1d29f 2024-11-22T13:35:12,472 INFO [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in ff049cea83bd336017598e377fcd90f8/info of ff049cea83bd336017598e377fcd90f8 into 22946e2c04424c85abf0268918b1d29f(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:35:12,472 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for ff049cea83bd336017598e377fcd90f8: 2024-11-22T13:35:12,474 INFO [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8., storeName=ff049cea83bd336017598e377fcd90f8/info, priority=13, startTime=1732282512386; duration=0sec 2024-11-22T13:35:12,474 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T13:35:12,474 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:35:12,474 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/22946e2c04424c85abf0268918b1d29f because midkey is the same as first or last row 2024-11-22T13:35:12,474 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T13:35:12,474 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:35:12,474 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/22946e2c04424c85abf0268918b1d29f because midkey is the same as first or last row 2024-11-22T13:35:12,475 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T13:35:12,475 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:35:12,475 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/22946e2c04424c85abf0268918b1d29f because midkey is the same as first or last row 2024-11-22T13:35:12,475 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:35:12,475 DEBUG [RS:0;e025332d312f:45803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ff049cea83bd336017598e377fcd90f8:info 2024-11-22T13:35:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45803 {}] regionserver.HRegion(8855): Flush requested on ff049cea83bd336017598e377fcd90f8 2024-11-22T13:35:24,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing ff049cea83bd336017598e377fcd90f8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:35:24,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/914863781801474ab795e112891ea364 is 1080, key is row0022/info:/1732282512400/Put/seqid=0 2024-11-22T13:35:24,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741849_1025 (size=12509) 2024-11-22T13:35:24,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741849_1025 (size=12509) 2024-11-22T13:35:24,461 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/914863781801474ab795e112891ea364 2024-11-22T13:35:24,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/914863781801474ab795e112891ea364 as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/914863781801474ab795e112891ea364 2024-11-22T13:35:24,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/914863781801474ab795e112891ea364, entries=7, sequenceid=42, filesize=12.2 K 2024-11-22T13:35:24,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for ff049cea83bd336017598e377fcd90f8 in 40ms, sequenceid=42, compaction requested=false 2024-11-22T13:35:24,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for ff049cea83bd336017598e377fcd90f8: 2024-11-22T13:35:24,484 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-22T13:35:24,484 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:35:24,484 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/22946e2c04424c85abf0268918b1d29f because midkey is the same as first or last row 2024-11-22T13:35:25,666 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T13:35:30,202 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ff049cea83bd336017598e377fcd90f8, had cached 0 bytes from a total of 40219 2024-11-22T13:35:32,468 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T13:35:32,469 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:35:32,470 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:35:32,481 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:32,482 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:32,482 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T13:35:32,482 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T13:35:32,482 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=764753395, stopped=false 2024-11-22T13:35:32,482 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e025332d312f,41345,1732282436551 2024-11-22T13:35:32,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:35:32,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:35:32,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:32,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:32,539 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:35:32,539 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:35:32,540 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:35:32,540 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:35:32,540 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:35:32,540 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:32,541 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e025332d312f,45803,1732282437327' ***** 2024-11-22T13:35:32,542 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T13:35:32,542 INFO [RS:0;e025332d312f:45803 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T13:35:32,543 INFO [RS:0;e025332d312f:45803 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T13:35:32,543 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T13:35:32,543 INFO [RS:0;e025332d312f:45803 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T13:35:32,543 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(3091): Received CLOSE for ff049cea83bd336017598e377fcd90f8 2024-11-22T13:35:32,544 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(959): stopping server e025332d312f,45803,1732282437327 2024-11-22T13:35:32,544 INFO [RS:0;e025332d312f:45803 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:35:32,544 INFO [RS:0;e025332d312f:45803 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e025332d312f:45803. 2024-11-22T13:35:32,544 DEBUG [RS:0;e025332d312f:45803 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:35:32,544 DEBUG [RS:0;e025332d312f:45803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:32,544 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing ff049cea83bd336017598e377fcd90f8, disabling compactions & flushes 2024-11-22T13:35:32,545 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:35:32,545 INFO [RS:0;e025332d312f:45803 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T13:35:32,545 INFO [RS:0;e025332d312f:45803 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T13:35:32,545 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:35:32,545 INFO [RS:0;e025332d312f:45803 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T13:35:32,545 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. after waiting 0 ms 2024-11-22T13:35:32,545 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T13:35:32,545 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:35:32,545 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing ff049cea83bd336017598e377fcd90f8 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-22T13:35:32,545 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T13:35:32,546 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:35:32,546 DEBUG [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(1325): Online Regions={ff049cea83bd336017598e377fcd90f8=TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T13:35:32,546 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:35:32,546 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:35:32,546 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:35:32,546 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:35:32,546 DEBUG [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, ff049cea83bd336017598e377fcd90f8 2024-11-22T13:35:32,546 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-22T13:35:32,551 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/121aade836fc471eb4d7bd574c2c256b is 1080, key is row0029/info:/1732282526447/Put/seqid=0 2024-11-22T13:35:32,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741850_1026 (size=8193) 2024-11-22T13:35:32,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741850_1026 (size=8193) 2024-11-22T13:35:32,558 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/121aade836fc471eb4d7bd574c2c256b 2024-11-22T13:35:32,567 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/.tmp/info/121aade836fc471eb4d7bd574c2c256b as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/121aade836fc471eb4d7bd574c2c256b 2024-11-22T13:35:32,568 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/.tmp/info/62ab55802376477caf4064b42fb08f62 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8./info:regioninfo/1732282440230/Put/seqid=0 2024-11-22T13:35:32,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741851_1027 (size=7016) 2024-11-22T13:35:32,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741851_1027 (size=7016) 2024-11-22T13:35:32,575 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/.tmp/info/62ab55802376477caf4064b42fb08f62 2024-11-22T13:35:32,577 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/121aade836fc471eb4d7bd574c2c256b, entries=3, sequenceid=48, filesize=8.0 K 2024-11-22T13:35:32,579 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ff049cea83bd336017598e377fcd90f8 in 33ms, sequenceid=48, compaction requested=true 2024-11-22T13:35:32,579 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/32612a2e4cfd4192a3f41de80a6e40e7, hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/a1afd8cc002e492abcb055005221631e, hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/7beed4fbe77445e9b55b92afa760d54d] to archive 2024-11-22T13:35:32,583 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T13:35:32,587 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/32612a2e4cfd4192a3f41de80a6e40e7 to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/32612a2e4cfd4192a3f41de80a6e40e7 2024-11-22T13:35:32,589 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/a1afd8cc002e492abcb055005221631e to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/a1afd8cc002e492abcb055005221631e 2024-11-22T13:35:32,590 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/7beed4fbe77445e9b55b92afa760d54d to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/info/7beed4fbe77445e9b55b92afa760d54d 2024-11-22T13:35:32,599 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/.tmp/ns/13d4dec902624141884ea4360988a0df is 43, key is default/ns:d/1732282439531/Put/seqid=0 2024-11-22T13:35:32,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741852_1028 (size=5153) 2024-11-22T13:35:32,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741852_1028 (size=5153) 2024-11-22T13:35:32,606 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/.tmp/ns/13d4dec902624141884ea4360988a0df 2024-11-22T13:35:32,602 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e025332d312f:41345 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T13:35:32,607 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [32612a2e4cfd4192a3f41de80a6e40e7=12509, a1afd8cc002e492abcb055005221631e=12509, 7beed4fbe77445e9b55b92afa760d54d=12509] 2024-11-22T13:35:32,613 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/default/TestLogRolling-testSlowSyncLogRolling/ff049cea83bd336017598e377fcd90f8/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-22T13:35:32,616 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:35:32,616 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for ff049cea83bd336017598e377fcd90f8: Waiting for close lock at 1732282532544Running coprocessor pre-close hooks at 1732282532544Disabling compacts and flushes for region at 1732282532544Disabling writes for close at 1732282532545 (+1 ms)Obtaining lock to block concurrent updates at 1732282532545Preparing flush snapshotting stores in ff049cea83bd336017598e377fcd90f8 at 1732282532545Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732282532546 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. at 1732282532547 (+1 ms)Flushing ff049cea83bd336017598e377fcd90f8/info: creating writer at 1732282532547Flushing ff049cea83bd336017598e377fcd90f8/info: appending metadata at 1732282532551 (+4 ms)Flushing ff049cea83bd336017598e377fcd90f8/info: closing flushed file at 1732282532551Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68318faf: reopening flushed file at 1732282532566 (+15 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for ff049cea83bd336017598e377fcd90f8 in 33ms, sequenceid=48, compaction requested=true at 1732282532579 (+13 ms)Writing region close event to WAL at 1732282532608 (+29 ms)Running coprocessor post-close hooks at 1732282532614 (+6 ms)Closed at 1732282532616 (+2 ms) 2024-11-22T13:35:32,617 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732282439747.ff049cea83bd336017598e377fcd90f8. 2024-11-22T13:35:32,632 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/.tmp/table/e22d28d30a50415d8bf4a43214fb7cd8 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732282440246/Put/seqid=0 2024-11-22T13:35:32,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741853_1029 (size=5396) 2024-11-22T13:35:32,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741853_1029 (size=5396) 2024-11-22T13:35:32,639 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/.tmp/table/e22d28d30a50415d8bf4a43214fb7cd8 2024-11-22T13:35:32,647 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/.tmp/info/62ab55802376477caf4064b42fb08f62 as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/info/62ab55802376477caf4064b42fb08f62 2024-11-22T13:35:32,652 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T13:35:32,652 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T13:35:32,655 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/info/62ab55802376477caf4064b42fb08f62, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T13:35:32,657 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/.tmp/ns/13d4dec902624141884ea4360988a0df as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/ns/13d4dec902624141884ea4360988a0df 2024-11-22T13:35:32,659 INFO [regionserver/e025332d312f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:35:32,666 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/ns/13d4dec902624141884ea4360988a0df, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T13:35:32,668 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/.tmp/table/e22d28d30a50415d8bf4a43214fb7cd8 as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/table/e22d28d30a50415d8bf4a43214fb7cd8 2024-11-22T13:35:32,676 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/table/e22d28d30a50415d8bf4a43214fb7cd8, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T13:35:32,678 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false 2024-11-22T13:35:32,684 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T13:35:32,685 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:35:32,685 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:35:32,685 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282532545Running coprocessor pre-close hooks at 1732282532545Disabling compacts and flushes for region at 1732282532545Disabling writes for close at 1732282532546 (+1 ms)Obtaining lock to block concurrent updates at 1732282532546Preparing flush snapshotting stores in 1588230740 at 1732282532546Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732282532547 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732282532548 (+1 ms)Flushing 1588230740/info: creating writer at 1732282532548Flushing 1588230740/info: appending metadata at 1732282532568 (+20 ms)Flushing 1588230740/info: closing flushed file at 1732282532568Flushing 1588230740/ns: creating writer at 1732282532584 (+16 ms)Flushing 1588230740/ns: appending metadata at 1732282532599 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732282532599Flushing 1588230740/table: creating writer at 1732282532615 (+16 ms)Flushing 1588230740/table: appending metadata at 1732282532631 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732282532631Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39894fd3: reopening flushed file at 1732282532646 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49fd7abf: reopening flushed file at 1732282532655 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e825b60: reopening flushed file at 1732282532666 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 132ms, sequenceid=11, compaction requested=false at 1732282532678 (+12 ms)Writing region close event to WAL at 1732282532679 (+1 ms)Running coprocessor post-close hooks at 1732282532685 (+6 ms)Closed at 1732282532685 2024-11-22T13:35:32,685 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T13:35:32,746 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(976): stopping server e025332d312f,45803,1732282437327; all regions closed. 2024-11-22T13:35:32,748 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,748 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,748 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,749 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,749 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741834_1010 (size=3066) 2024-11-22T13:35:32,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741834_1010 (size=3066) 2024-11-22T13:35:32,758 DEBUG [RS:0;e025332d312f:45803 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs 2024-11-22T13:35:32,758 INFO [RS:0;e025332d312f:45803 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C45803%2C1732282437327.meta:.meta(num 1732282439277) 2024-11-22T13:35:32,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,759 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:32,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741847_1023 (size=12695) 2024-11-22T13:35:32,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741847_1023 (size=12695) 2024-11-22T13:35:32,765 DEBUG [RS:0;e025332d312f:45803 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/oldWALs 2024-11-22T13:35:32,765 INFO [RS:0;e025332d312f:45803 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C45803%2C1732282437327:(num 1732282512399) 2024-11-22T13:35:32,765 DEBUG [RS:0;e025332d312f:45803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:32,765 INFO [RS:0;e025332d312f:45803 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:35:32,765 INFO [RS:0;e025332d312f:45803 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:35:32,765 INFO [RS:0;e025332d312f:45803 {}] hbase.ChoreService(370): Chore service for: regionserver/e025332d312f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T13:35:32,766 INFO [RS:0;e025332d312f:45803 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:35:32,766 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:35:32,766 INFO [RS:0;e025332d312f:45803 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45803 2024-11-22T13:35:32,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:35:32,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e025332d312f,45803,1732282437327 2024-11-22T13:35:32,779 INFO [RS:0;e025332d312f:45803 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:35:32,780 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e025332d312f,45803,1732282437327] 2024-11-22T13:35:32,874 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e025332d312f,45803,1732282437327 already deleted, retry=false 2024-11-22T13:35:32,875 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e025332d312f,45803,1732282437327 expired; onlineServers=0 2024-11-22T13:35:32,875 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e025332d312f,41345,1732282436551' ***** 2024-11-22T13:35:32,876 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T13:35:32,876 INFO [M:0;e025332d312f:41345 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:35:32,876 INFO [M:0;e025332d312f:41345 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:35:32,877 DEBUG [M:0;e025332d312f:41345 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T13:35:32,877 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T13:35:32,877 DEBUG [M:0;e025332d312f:41345 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T13:35:32,877 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282438468 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282438468,5,FailOnTimeoutGroup] 2024-11-22T13:35:32,877 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282438467 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282438467,5,FailOnTimeoutGroup] 2024-11-22T13:35:32,878 INFO [M:0;e025332d312f:41345 {}] hbase.ChoreService(370): Chore service for: master/e025332d312f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T13:35:32,878 INFO [M:0;e025332d312f:41345 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:35:32,879 DEBUG [M:0;e025332d312f:41345 {}] master.HMaster(1795): Stopping service threads 2024-11-22T13:35:32,879 INFO [M:0;e025332d312f:41345 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T13:35:32,879 INFO [M:0;e025332d312f:41345 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:35:32,880 INFO [M:0;e025332d312f:41345 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T13:35:32,880 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T13:35:32,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T13:35:32,891 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:32,891 DEBUG [M:0;e025332d312f:41345 {}] zookeeper.ZKUtil(347): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T13:35:32,891 WARN [M:0;e025332d312f:41345 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T13:35:32,892 INFO [M:0;e025332d312f:41345 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/.lastflushedseqids 2024-11-22T13:35:32,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741854_1030 (size=130) 2024-11-22T13:35:32,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741854_1030 (size=130) 2024-11-22T13:35:32,903 INFO [M:0;e025332d312f:41345 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T13:35:32,903 INFO [M:0;e025332d312f:41345 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T13:35:32,903 DEBUG [M:0;e025332d312f:41345 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:35:32,903 INFO [M:0;e025332d312f:41345 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:32,903 DEBUG [M:0;e025332d312f:41345 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:32,903 DEBUG [M:0;e025332d312f:41345 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:35:32,904 DEBUG [M:0;e025332d312f:41345 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:32,904 INFO [M:0;e025332d312f:41345 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-22T13:35:32,921 DEBUG [M:0;e025332d312f:41345 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/76ad84ae09b24d9bb474ff1f2849f354 is 82, key is hbase:meta,,1/info:regioninfo/1732282439350/Put/seqid=0 2024-11-22T13:35:32,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741855_1031 (size=5672) 2024-11-22T13:35:32,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741855_1031 (size=5672) 2024-11-22T13:35:32,928 INFO [M:0;e025332d312f:41345 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/76ad84ae09b24d9bb474ff1f2849f354 2024-11-22T13:35:32,951 DEBUG [M:0;e025332d312f:41345 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/58d3be52c81041a881bf115731b744e7 is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732282440254/Put/seqid=0 2024-11-22T13:35:32,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741856_1032 (size=6248) 2024-11-22T13:35:32,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741856_1032 (size=6248) 2024-11-22T13:35:32,957 INFO [M:0;e025332d312f:41345 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/58d3be52c81041a881bf115731b744e7 2024-11-22T13:35:32,963 INFO [M:0;e025332d312f:41345 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 58d3be52c81041a881bf115731b744e7 2024-11-22T13:35:32,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:35:32,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45803-0x10162c0443a0001, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:35:32,965 INFO [RS:0;e025332d312f:45803 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:35:32,965 INFO [RS:0;e025332d312f:45803 {}] regionserver.HRegionServer(1031): Exiting; stopping=e025332d312f,45803,1732282437327; zookeeper connection closed. 2024-11-22T13:35:32,966 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@66b8947b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@66b8947b 2024-11-22T13:35:32,966 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T13:35:32,983 DEBUG [M:0;e025332d312f:41345 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7fd13ef093448b18ea8b0206b30ebb3 is 69, key is e025332d312f,45803,1732282437327/rs:state/1732282438572/Put/seqid=0 2024-11-22T13:35:32,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741857_1033 (size=5156) 2024-11-22T13:35:32,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741857_1033 (size=5156) 2024-11-22T13:35:32,990 INFO [M:0;e025332d312f:41345 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7fd13ef093448b18ea8b0206b30ebb3 2024-11-22T13:35:33,014 DEBUG [M:0;e025332d312f:41345 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dc468e12780f465bb8bff49d7c37c107 is 52, key is load_balancer_on/state:d/1732282439726/Put/seqid=0 2024-11-22T13:35:33,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741858_1034 (size=5056) 2024-11-22T13:35:33,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741858_1034 (size=5056) 2024-11-22T13:35:33,021 INFO [M:0;e025332d312f:41345 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dc468e12780f465bb8bff49d7c37c107 2024-11-22T13:35:33,030 DEBUG [M:0;e025332d312f:41345 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/76ad84ae09b24d9bb474ff1f2849f354 as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/76ad84ae09b24d9bb474ff1f2849f354 2024-11-22T13:35:33,037 INFO [M:0;e025332d312f:41345 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/76ad84ae09b24d9bb474ff1f2849f354, entries=8, sequenceid=59, filesize=5.5 K 2024-11-22T13:35:33,039 DEBUG [M:0;e025332d312f:41345 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/58d3be52c81041a881bf115731b744e7 as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/58d3be52c81041a881bf115731b744e7 2024-11-22T13:35:33,046 INFO [M:0;e025332d312f:41345 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 58d3be52c81041a881bf115731b744e7 2024-11-22T13:35:33,046 INFO [M:0;e025332d312f:41345 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/58d3be52c81041a881bf115731b744e7, entries=6, sequenceid=59, filesize=6.1 K 2024-11-22T13:35:33,048 DEBUG [M:0;e025332d312f:41345 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f7fd13ef093448b18ea8b0206b30ebb3 as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7fd13ef093448b18ea8b0206b30ebb3 2024-11-22T13:35:33,055 INFO [M:0;e025332d312f:41345 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f7fd13ef093448b18ea8b0206b30ebb3, entries=1, sequenceid=59, filesize=5.0 K 2024-11-22T13:35:33,056 DEBUG [M:0;e025332d312f:41345 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dc468e12780f465bb8bff49d7c37c107 as hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dc468e12780f465bb8bff49d7c37c107 2024-11-22T13:35:33,062 INFO [M:0;e025332d312f:41345 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dc468e12780f465bb8bff49d7c37c107, entries=1, sequenceid=59, filesize=4.9 K 2024-11-22T13:35:33,064 INFO [M:0;e025332d312f:41345 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 159ms, sequenceid=59, compaction requested=false 2024-11-22T13:35:33,065 INFO [M:0;e025332d312f:41345 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:33,065 DEBUG [M:0;e025332d312f:41345 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282532903Disabling compacts and flushes for region at 1732282532903Disabling writes for close at 1732282532903Obtaining lock to block concurrent updates at 1732282532904 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732282532904Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1732282532904Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732282532905 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732282532905Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732282532921 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732282532921Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732282532935 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732282532950 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732282532950Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732282532964 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732282532982 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732282532982Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732282532997 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732282533013 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732282533014 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f65675b: reopening flushed file at 1732282533028 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@646dca1b: reopening flushed file at 1732282533038 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1884ebca: reopening flushed file at 1732282533046 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37ffda82: reopening flushed file at 1732282533055 (+9 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 159ms, sequenceid=59, compaction requested=false at 1732282533064 (+9 ms)Writing region close event to WAL at 1732282533065 (+1 ms)Closed at 1732282533065 2024-11-22T13:35:33,066 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:33,066 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:33,066 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:33,066 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:33,067 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:33,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42491 is added to blk_1073741830_1006 (size=27985) 2024-11-22T13:35:33,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42593 is added to blk_1073741830_1006 (size=27985) 2024-11-22T13:35:33,070 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:35:33,070 INFO [M:0;e025332d312f:41345 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T13:35:33,070 INFO [M:0;e025332d312f:41345 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41345 2024-11-22T13:35:33,071 INFO [M:0;e025332d312f:41345 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:35:33,180 INFO [M:0;e025332d312f:41345 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:35:33,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:35:33,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41345-0x10162c0443a0000, quorum=127.0.0.1:53314, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:35:33,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1467625d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:33,192 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:33,192 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:33,192 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:33,192 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:33,195 WARN [BP-1289964154-172.17.0.2-1732282432667 heartbeating to localhost/127.0.0.1:37741 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:35:33,195 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:35:33,195 WARN [BP-1289964154-172.17.0.2-1732282432667 heartbeating to localhost/127.0.0.1:37741 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1289964154-172.17.0.2-1732282432667 (Datanode Uuid 4b0861cf-737c-45f8-ab27-73d17d439b35) service to localhost/127.0.0.1:37741 2024-11-22T13:35:33,195 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:35:33,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/data/data3/current/BP-1289964154-172.17.0.2-1732282432667 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:33,196 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/data/data4/current/BP-1289964154-172.17.0.2-1732282432667 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:33,197 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:35:33,199 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c2fdbac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:33,200 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:33,200 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:33,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:33,200 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:33,201 WARN [BP-1289964154-172.17.0.2-1732282432667 heartbeating to localhost/127.0.0.1:37741 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:35:33,201 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:35:33,201 WARN [BP-1289964154-172.17.0.2-1732282432667 heartbeating to localhost/127.0.0.1:37741 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1289964154-172.17.0.2-1732282432667 (Datanode Uuid 819c0b05-e0fe-445e-b118-1dcead9b721b) service to localhost/127.0.0.1:37741 2024-11-22T13:35:33,201 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:35:33,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/data/data1/current/BP-1289964154-172.17.0.2-1732282432667 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:33,202 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/cluster_9c29dbe3-4880-8de5-018e-c4a9fb0ebcf4/data/data2/current/BP-1289964154-172.17.0.2-1732282432667 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:33,203 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:35:33,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:35:33,212 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:33,212 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:33,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:33,212 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:33,221 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T13:35:33,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T13:35:33,258 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=81 (was 12) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37741 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:37741 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37741 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37741 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37741 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/e025332d312f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37741 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3e77a740 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ForkJoinPool-2-worker-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37741 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37741 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/e025332d312f:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/e025332d312f:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=58 (was 161), ProcessCount=11 (was 11), AvailableMemoryMB=2995 (was 3587) 2024-11-22T13:35:33,264 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=82, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=58, ProcessCount=11, AvailableMemoryMB=2996 2024-11-22T13:35:33,264 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T13:35:33,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.log.dir so I do NOT create it in target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede 2024-11-22T13:35:33,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/217a417c-36d9-7b0e-0b11-9a20ffd205c0/hadoop.tmp.dir so I do NOT create it in target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede 2024-11-22T13:35:33,265 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c, deleteOnExit=true 2024-11-22T13:35:33,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T13:35:33,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/test.cache.data in system properties and HBase conf 2024-11-22T13:35:33,265 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T13:35:33,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.log.dir in system properties and HBase conf 2024-11-22T13:35:33,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T13:35:33,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T13:35:33,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T13:35:33,266 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T13:35:33,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:35:33,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:35:33,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T13:35:33,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:35:33,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T13:35:33,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T13:35:33,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:35:33,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:35:33,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T13:35:33,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/nfs.dump.dir in system properties and HBase conf 2024-11-22T13:35:33,267 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/java.io.tmpdir in system properties and HBase conf 2024-11-22T13:35:33,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:35:33,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T13:35:33,268 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T13:35:33,282 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:35:33,653 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:33,659 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:35:33,661 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:35:33,661 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:35:33,661 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T13:35:33,663 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:33,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65506a11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:35:33,665 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75cbfab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:35:33,763 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@493d1d34{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/java.io.tmpdir/jetty-localhost-43649-hadoop-hdfs-3_4_1-tests_jar-_-any-3216317804531553911/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:35:33,764 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a249094{HTTP/1.1, (http/1.1)}{localhost:43649} 2024-11-22T13:35:33,764 INFO [Time-limited test {}] server.Server(415): Started @102944ms 2024-11-22T13:35:33,776 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:35:34,067 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:34,071 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:35:34,072 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:35:34,072 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:35:34,072 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:35:34,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c8914e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:35:34,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3aa9354f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:35:34,166 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52b07bdb{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/java.io.tmpdir/jetty-localhost-43125-hadoop-hdfs-3_4_1-tests_jar-_-any-12333905061803510311/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:34,167 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70121b28{HTTP/1.1, (http/1.1)}{localhost:43125} 2024-11-22T13:35:34,167 INFO [Time-limited test {}] server.Server(415): Started @103346ms 2024-11-22T13:35:34,168 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:35:34,201 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:34,205 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:35:34,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:35:34,208 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:35:34,208 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:35:34,208 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b915b67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:35:34,209 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bc8c098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:35:34,303 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f5c23ef{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/java.io.tmpdir/jetty-localhost-43127-hadoop-hdfs-3_4_1-tests_jar-_-any-534266704867911889/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:34,304 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@716c7b87{HTTP/1.1, (http/1.1)}{localhost:43127} 2024-11-22T13:35:34,304 INFO [Time-limited test {}] server.Server(415): Started @103483ms 2024-11-22T13:35:34,305 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:35:35,274 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/data/data1/current/BP-1539379056-172.17.0.2-1732282533293/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:35,274 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/data/data2/current/BP-1539379056-172.17.0.2-1732282533293/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:35,296 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:35:35,298 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d0cbe583c9daf07 with lease ID 0xf775977ba3145b54: Processing first storage report for DS-4cdb386d-3f4b-48ac-99c1-04a6e913d8f1 from datanode DatanodeRegistration(127.0.0.1:38645, datanodeUuid=12c1ec67-6b4b-4b40-89ea-1c76a793b3a1, infoPort=44677, infoSecurePort=0, ipcPort=41675, storageInfo=lv=-57;cid=testClusterID;nsid=932006477;c=1732282533293) 2024-11-22T13:35:35,298 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d0cbe583c9daf07 with lease ID 0xf775977ba3145b54: from storage DS-4cdb386d-3f4b-48ac-99c1-04a6e913d8f1 node DatanodeRegistration(127.0.0.1:38645, datanodeUuid=12c1ec67-6b4b-4b40-89ea-1c76a793b3a1, infoPort=44677, infoSecurePort=0, ipcPort=41675, storageInfo=lv=-57;cid=testClusterID;nsid=932006477;c=1732282533293), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:35,299 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2d0cbe583c9daf07 with lease ID 0xf775977ba3145b54: Processing first storage report for DS-28e7620e-f9d8-4d33-9990-4c22821861d3 from datanode DatanodeRegistration(127.0.0.1:38645, datanodeUuid=12c1ec67-6b4b-4b40-89ea-1c76a793b3a1, infoPort=44677, infoSecurePort=0, ipcPort=41675, storageInfo=lv=-57;cid=testClusterID;nsid=932006477;c=1732282533293) 2024-11-22T13:35:35,299 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2d0cbe583c9daf07 with lease ID 0xf775977ba3145b54: from storage DS-28e7620e-f9d8-4d33-9990-4c22821861d3 node DatanodeRegistration(127.0.0.1:38645, datanodeUuid=12c1ec67-6b4b-4b40-89ea-1c76a793b3a1, infoPort=44677, infoSecurePort=0, ipcPort=41675, storageInfo=lv=-57;cid=testClusterID;nsid=932006477;c=1732282533293), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:35,414 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/data/data3/current/BP-1539379056-172.17.0.2-1732282533293/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:35,415 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/data/data4/current/BP-1539379056-172.17.0.2-1732282533293/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:35,433 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:35:35,436 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67c32750b5626ea0 with lease ID 0xf775977ba3145b55: Processing first storage report for DS-0a1a36d8-d67b-4cc6-8ae0-fad6a89a557b from datanode DatanodeRegistration(127.0.0.1:42413, datanodeUuid=75735514-ca04-4497-878f-2cd8a886e920, infoPort=36601, infoSecurePort=0, ipcPort=35133, storageInfo=lv=-57;cid=testClusterID;nsid=932006477;c=1732282533293) 2024-11-22T13:35:35,436 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67c32750b5626ea0 with lease ID 0xf775977ba3145b55: from storage DS-0a1a36d8-d67b-4cc6-8ae0-fad6a89a557b node DatanodeRegistration(127.0.0.1:42413, datanodeUuid=75735514-ca04-4497-878f-2cd8a886e920, infoPort=36601, infoSecurePort=0, ipcPort=35133, storageInfo=lv=-57;cid=testClusterID;nsid=932006477;c=1732282533293), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:35,436 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67c32750b5626ea0 with lease ID 0xf775977ba3145b55: Processing first storage report for DS-1aaddb40-8936-4ceb-8555-5122ed90c091 from datanode DatanodeRegistration(127.0.0.1:42413, datanodeUuid=75735514-ca04-4497-878f-2cd8a886e920, infoPort=36601, infoSecurePort=0, ipcPort=35133, storageInfo=lv=-57;cid=testClusterID;nsid=932006477;c=1732282533293) 2024-11-22T13:35:35,436 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67c32750b5626ea0 with lease ID 0xf775977ba3145b55: from storage DS-1aaddb40-8936-4ceb-8555-5122ed90c091 node DatanodeRegistration(127.0.0.1:42413, datanodeUuid=75735514-ca04-4497-878f-2cd8a886e920, infoPort=36601, infoSecurePort=0, ipcPort=35133, storageInfo=lv=-57;cid=testClusterID;nsid=932006477;c=1732282533293), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T13:35:35,449 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede 2024-11-22T13:35:35,451 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/zookeeper_0, clientPort=64214, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T13:35:35,452 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64214 2024-11-22T13:35:35,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:35,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:35,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:35:35,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:35:35,466 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf with version=8 2024-11-22T13:35:35,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/hbase-staging 2024-11-22T13:35:35,469 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:35:35,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:35,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:35,469 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:35:35,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:35,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:35:35,469 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T13:35:35,469 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:35:35,470 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45945 2024-11-22T13:35:35,472 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45945 connecting to ZooKeeper ensemble=127.0.0.1:64214 2024-11-22T13:35:35,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:459450x0, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:35:35,524 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45945-0x10162c1c9c60000 connected 2024-11-22T13:35:35,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:35,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:35,611 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:35:35,611 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf, hbase.cluster.distributed=false 2024-11-22T13:35:35,613 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:35:35,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45945 2024-11-22T13:35:35,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45945 2024-11-22T13:35:35,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45945 2024-11-22T13:35:35,614 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45945 2024-11-22T13:35:35,616 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45945 2024-11-22T13:35:35,632 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:35:35,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:35,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:35,633 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:35:35,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:35,633 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:35:35,633 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T13:35:35,633 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:35:35,634 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40233 2024-11-22T13:35:35,635 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40233 connecting to ZooKeeper ensemble=127.0.0.1:64214 2024-11-22T13:35:35,636 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:35,638 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:35,653 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:402330x0, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:35:35,654 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:402330x0, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:35:35,654 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40233-0x10162c1c9c60001 connected 2024-11-22T13:35:35,654 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T13:35:35,655 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T13:35:35,656 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T13:35:35,658 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:35:35,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40233 2024-11-22T13:35:35,660 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40233 2024-11-22T13:35:35,661 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40233 2024-11-22T13:35:35,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40233 2024-11-22T13:35:35,662 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40233 2024-11-22T13:35:35,675 DEBUG [M:0;e025332d312f:45945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e025332d312f:45945 2024-11-22T13:35:35,676 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e025332d312f,45945,1732282535468 2024-11-22T13:35:35,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:35:35,685 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:35:35,685 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e025332d312f,45945,1732282535468 2024-11-22T13:35:35,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:35,695 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T13:35:35,695 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:35,696 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T13:35:35,696 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e025332d312f,45945,1732282535468 from backup master directory 2024-11-22T13:35:35,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e025332d312f,45945,1732282535468 2024-11-22T13:35:35,706 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:35:35,706 WARN [master/e025332d312f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:35:35,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:35:35,706 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e025332d312f,45945,1732282535468 2024-11-22T13:35:35,711 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/hbase.id] with ID: 4ce983b5-530f-49fb-bee6-dfb4caeb44e9 2024-11-22T13:35:35,711 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/.tmp/hbase.id 2024-11-22T13:35:35,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:35:35,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:35:35,718 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/.tmp/hbase.id]:[hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/hbase.id] 2024-11-22T13:35:35,732 INFO [master/e025332d312f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:35,732 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T13:35:35,734 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T13:35:35,748 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:35,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:35,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:35:35,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:35:35,756 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:35:35,757 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T13:35:35,757 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:35:35,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:35:35,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:35:35,767 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store 2024-11-22T13:35:35,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:35:35,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:35:35,777 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:35,777 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:35:35,777 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:35,777 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:35,777 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:35:35,777 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:35,777 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:35,777 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282535777Disabling compacts and flushes for region at 1732282535777Disabling writes for close at 1732282535777Writing region close event to WAL at 1732282535777Closed at 1732282535777 2024-11-22T13:35:35,779 WARN [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/.initializing 2024-11-22T13:35:35,779 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/WALs/e025332d312f,45945,1732282535468 2024-11-22T13:35:35,782 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C45945%2C1732282535468, suffix=, logDir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/WALs/e025332d312f,45945,1732282535468, archiveDir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/oldWALs, maxLogs=10 2024-11-22T13:35:35,782 INFO [master/e025332d312f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45945%2C1732282535468.1732282535782 2024-11-22T13:35:35,788 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/WALs/e025332d312f,45945,1732282535468/e025332d312f%2C45945%2C1732282535468.1732282535782 2024-11-22T13:35:35,792 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36601:36601),(127.0.0.1/127.0.0.1:44677:44677)] 2024-11-22T13:35:35,796 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:35:35,796 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:35,796 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,797 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T13:35:35,800 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:35,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:35,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T13:35:35,802 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:35,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:35:35,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,805 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T13:35:35,805 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:35,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:35:35,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,807 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T13:35:35,807 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:35,808 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:35:35,808 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,809 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,809 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,810 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,810 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,811 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T13:35:35,812 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:35,815 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:35:35,815 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801828, jitterRate=0.019577771425247192}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T13:35:35,817 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732282535797Initializing all the Stores at 1732282535798 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282535798Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282535798Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282535798Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282535798Cleaning up temporary data from old regions at 1732282535810 (+12 ms)Region opened successfully at 1732282535817 (+7 ms) 2024-11-22T13:35:35,817 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T13:35:35,821 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7222511f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:35:35,822 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T13:35:35,823 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T13:35:35,823 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T13:35:35,823 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T13:35:35,824 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T13:35:35,824 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T13:35:35,824 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T13:35:35,827 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T13:35:35,829 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T13:35:35,837 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T13:35:35,838 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T13:35:35,839 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T13:35:35,848 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T13:35:35,849 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T13:35:35,850 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T13:35:35,859 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T13:35:35,860 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T13:35:35,869 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T13:35:35,872 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T13:35:35,884 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T13:35:35,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:35:35,895 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:35:35,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:35,895 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:35,896 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e025332d312f,45945,1732282535468, sessionid=0x10162c1c9c60000, setting cluster-up flag (Was=false) 2024-11-22T13:35:35,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:35,916 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:35,948 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T13:35:35,951 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,45945,1732282535468 2024-11-22T13:35:35,975 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:35,975 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:36,011 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T13:35:36,012 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,45945,1732282535468 2024-11-22T13:35:36,014 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T13:35:36,016 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T13:35:36,016 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T13:35:36,016 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T13:35:36,016 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e025332d312f,45945,1732282535468 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T13:35:36,018 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:35:36,018 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:35:36,018 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:35:36,018 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:35:36,019 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e025332d312f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T13:35:36,019 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,019 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:35:36,019 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,022 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:35:36,022 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T13:35:36,022 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732282566022 2024-11-22T13:35:36,023 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T13:35:36,023 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T13:35:36,023 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T13:35:36,023 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T13:35:36,023 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T13:35:36,023 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T13:35:36,023 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,023 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T13:35:36,024 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T13:35:36,024 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T13:35:36,024 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:36,024 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T13:35:36,024 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T13:35:36,024 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T13:35:36,024 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282536024,5,FailOnTimeoutGroup] 2024-11-22T13:35:36,024 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282536024,5,FailOnTimeoutGroup] 2024-11-22T13:35:36,024 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,025 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T13:35:36,025 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,025 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:35:36,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:35:36,035 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T13:35:36,035 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf 2024-11-22T13:35:36,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:35:36,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:35:36,043 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:36,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:35:36,047 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:35:36,047 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:36,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:36,047 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:35:36,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:35:36,049 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:36,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:36,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:35:36,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:35:36,052 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:36,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:36,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:35:36,054 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:35:36,054 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:36,054 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:36,054 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:35:36,055 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740 2024-11-22T13:35:36,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740 2024-11-22T13:35:36,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:35:36,057 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:35:36,058 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:35:36,059 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:35:36,062 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:35:36,062 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717513, jitterRate=-0.08763526380062103}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:35:36,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732282536043Initializing all the Stores at 1732282536044 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282536044Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282536045 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282536045Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282536045Cleaning up temporary data from old regions at 1732282536057 (+12 ms)Region opened successfully at 1732282536064 (+7 ms) 2024-11-22T13:35:36,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:35:36,064 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:35:36,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:35:36,064 INFO [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(746): ClusterId : 4ce983b5-530f-49fb-bee6-dfb4caeb44e9 2024-11-22T13:35:36,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:35:36,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:35:36,064 DEBUG [RS:0;e025332d312f:40233 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T13:35:36,065 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:35:36,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282536064Disabling compacts and flushes for region at 1732282536064Disabling writes for close at 1732282536064Writing region close event to WAL at 1732282536065 (+1 ms)Closed at 1732282536065 2024-11-22T13:35:36,067 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:35:36,067 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T13:35:36,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T13:35:36,069 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:35:36,070 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T13:35:36,075 DEBUG [RS:0;e025332d312f:40233 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T13:35:36,075 DEBUG [RS:0;e025332d312f:40233 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T13:35:36,086 DEBUG [RS:0;e025332d312f:40233 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T13:35:36,086 DEBUG [RS:0;e025332d312f:40233 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@303355c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:35:36,098 DEBUG [RS:0;e025332d312f:40233 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e025332d312f:40233 2024-11-22T13:35:36,098 INFO [RS:0;e025332d312f:40233 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T13:35:36,098 INFO [RS:0;e025332d312f:40233 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T13:35:36,098 DEBUG [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T13:35:36,099 INFO [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(2659): reportForDuty to master=e025332d312f,45945,1732282535468 with port=40233, startcode=1732282535632 2024-11-22T13:35:36,099 DEBUG [RS:0;e025332d312f:40233 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T13:35:36,102 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40845, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T13:35:36,102 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45945 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e025332d312f,40233,1732282535632 2024-11-22T13:35:36,102 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45945 {}] master.ServerManager(517): Registering regionserver=e025332d312f,40233,1732282535632 2024-11-22T13:35:36,104 DEBUG [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf 2024-11-22T13:35:36,105 DEBUG [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36837 2024-11-22T13:35:36,105 DEBUG [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T13:35:36,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:35:36,117 DEBUG [RS:0;e025332d312f:40233 {}] zookeeper.ZKUtil(111): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e025332d312f,40233,1732282535632 2024-11-22T13:35:36,117 WARN [RS:0;e025332d312f:40233 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:35:36,117 INFO [RS:0;e025332d312f:40233 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:35:36,117 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e025332d312f,40233,1732282535632] 2024-11-22T13:35:36,117 DEBUG [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/WALs/e025332d312f,40233,1732282535632 2024-11-22T13:35:36,122 INFO [RS:0;e025332d312f:40233 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T13:35:36,125 INFO [RS:0;e025332d312f:40233 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T13:35:36,126 INFO [RS:0;e025332d312f:40233 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T13:35:36,126 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,126 INFO [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T13:35:36,127 INFO [RS:0;e025332d312f:40233 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T13:35:36,127 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,128 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,129 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,129 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:36,129 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:35:36,129 DEBUG [RS:0;e025332d312f:40233 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:35:36,129 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,129 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,129 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,130 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,130 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,130 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,40233,1732282535632-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:35:36,145 INFO [RS:0;e025332d312f:40233 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T13:35:36,145 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,40233,1732282535632-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,145 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,145 INFO [RS:0;e025332d312f:40233 {}] regionserver.Replication(171): e025332d312f,40233,1732282535632 started 2024-11-22T13:35:36,167 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,167 INFO [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(1482): Serving as e025332d312f,40233,1732282535632, RpcServer on e025332d312f/172.17.0.2:40233, sessionid=0x10162c1c9c60001 2024-11-22T13:35:36,167 DEBUG [RS:0;e025332d312f:40233 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T13:35:36,167 DEBUG [RS:0;e025332d312f:40233 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e025332d312f,40233,1732282535632 2024-11-22T13:35:36,167 DEBUG [RS:0;e025332d312f:40233 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,40233,1732282535632' 2024-11-22T13:35:36,167 DEBUG [RS:0;e025332d312f:40233 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T13:35:36,168 DEBUG [RS:0;e025332d312f:40233 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T13:35:36,169 DEBUG [RS:0;e025332d312f:40233 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T13:35:36,169 DEBUG [RS:0;e025332d312f:40233 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T13:35:36,169 DEBUG [RS:0;e025332d312f:40233 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e025332d312f,40233,1732282535632 2024-11-22T13:35:36,169 DEBUG [RS:0;e025332d312f:40233 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,40233,1732282535632' 2024-11-22T13:35:36,169 DEBUG [RS:0;e025332d312f:40233 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T13:35:36,170 DEBUG [RS:0;e025332d312f:40233 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T13:35:36,170 DEBUG [RS:0;e025332d312f:40233 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T13:35:36,170 INFO [RS:0;e025332d312f:40233 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T13:35:36,170 INFO [RS:0;e025332d312f:40233 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T13:35:36,220 WARN [e025332d312f:45945 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T13:35:36,274 INFO [RS:0;e025332d312f:40233 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C40233%2C1732282535632, suffix=, logDir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/WALs/e025332d312f,40233,1732282535632, archiveDir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/oldWALs, maxLogs=32 2024-11-22T13:35:36,279 INFO [RS:0;e025332d312f:40233 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C40233%2C1732282535632.1732282536279 2024-11-22T13:35:36,286 INFO [RS:0;e025332d312f:40233 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/WALs/e025332d312f,40233,1732282535632/e025332d312f%2C40233%2C1732282535632.1732282536279 2024-11-22T13:35:36,287 DEBUG [RS:0;e025332d312f:40233 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36601:36601),(127.0.0.1/127.0.0.1:44677:44677)] 2024-11-22T13:35:36,471 DEBUG [e025332d312f:45945 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T13:35:36,472 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e025332d312f,40233,1732282535632 2024-11-22T13:35:36,475 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,40233,1732282535632, state=OPENING 2024-11-22T13:35:36,565 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T13:35:36,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:36,575 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:36,576 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:35:36,576 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:35:36,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,40233,1732282535632}] 2024-11-22T13:35:36,577 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:35:36,731 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T13:35:36,736 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50039, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T13:35:36,743 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T13:35:36,744 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:35:36,747 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C40233%2C1732282535632.meta, suffix=.meta, logDir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/WALs/e025332d312f,40233,1732282535632, archiveDir=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/oldWALs, maxLogs=32 2024-11-22T13:35:36,750 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C40233%2C1732282535632.meta.1732282536749.meta 2024-11-22T13:35:36,756 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/WALs/e025332d312f,40233,1732282535632/e025332d312f%2C40233%2C1732282535632.meta.1732282536749.meta 2024-11-22T13:35:36,758 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44677:44677),(127.0.0.1/127.0.0.1:36601:36601)] 2024-11-22T13:35:36,759 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:35:36,759 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T13:35:36,760 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T13:35:36,760 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T13:35:36,760 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T13:35:36,760 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:36,760 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T13:35:36,760 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T13:35:36,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:35:36,764 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:35:36,764 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:36,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:36,765 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:35:36,767 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:35:36,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:36,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:36,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:35:36,769 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:35:36,769 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:36,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:36,770 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:35:36,772 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:35:36,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:36,772 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:36,773 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:35:36,773 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740 2024-11-22T13:35:36,775 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740 2024-11-22T13:35:36,776 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:35:36,776 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:35:36,777 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:35:36,778 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:35:36,779 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706332, jitterRate=-0.10185259580612183}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:35:36,779 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T13:35:36,780 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732282536761Writing region info on filesystem at 1732282536761Initializing all the Stores at 1732282536762 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282536762Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282536763 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282536763Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282536763Cleaning up temporary data from old regions at 1732282536776 (+13 ms)Running coprocessor post-open hooks at 1732282536779 (+3 ms)Region opened successfully at 1732282536780 (+1 ms) 2024-11-22T13:35:36,781 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732282536730 2024-11-22T13:35:36,784 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T13:35:36,785 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T13:35:36,785 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,40233,1732282535632 2024-11-22T13:35:36,787 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,40233,1732282535632, state=OPEN 2024-11-22T13:35:36,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:35:36,830 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:35:36,830 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e025332d312f,40233,1732282535632 2024-11-22T13:35:36,830 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:35:36,830 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:35:36,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T13:35:36,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,40233,1732282535632 in 254 msec 2024-11-22T13:35:36,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T13:35:36,840 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 768 msec 2024-11-22T13:35:36,842 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:35:36,842 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T13:35:36,844 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:35:36,844 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,40233,1732282535632, seqNum=-1] 2024-11-22T13:35:36,845 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:35:36,848 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40613, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:35:36,856 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 840 msec 2024-11-22T13:35:36,856 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732282536856, completionTime=-1 2024-11-22T13:35:36,856 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T13:35:36,857 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T13:35:36,859 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T13:35:36,859 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732282596859 2024-11-22T13:35:36,859 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732282656859 2024-11-22T13:35:36,859 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T13:35:36,859 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45945,1732282535468-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,859 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45945,1732282535468-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,860 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45945,1732282535468-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,860 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e025332d312f:45945, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,860 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,860 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,862 DEBUG [master/e025332d312f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T13:35:36,865 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.159sec 2024-11-22T13:35:36,865 INFO [master/e025332d312f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T13:35:36,865 INFO [master/e025332d312f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T13:35:36,865 INFO [master/e025332d312f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T13:35:36,865 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T13:35:36,865 INFO [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T13:35:36,865 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45945,1732282535468-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:35:36,865 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45945,1732282535468-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T13:35:36,868 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T13:35:36,868 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T13:35:36,868 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45945,1732282535468-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:36,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T13:35:36,965 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b9379ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:35:36,965 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e025332d312f,45945,-1 for getting cluster id 2024-11-22T13:35:36,966 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T13:35:36,969 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4ce983b5-530f-49fb-bee6-dfb4caeb44e9' 2024-11-22T13:35:36,969 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T13:35:36,970 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4ce983b5-530f-49fb-bee6-dfb4caeb44e9" 2024-11-22T13:35:36,970 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e0c936b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:35:36,970 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e025332d312f,45945,-1] 2024-11-22T13:35:36,971 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T13:35:36,971 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:36,974 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54890, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T13:35:36,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3aadf114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:35:36,976 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:35:36,978 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,40233,1732282535632, seqNum=-1] 2024-11-22T13:35:36,979 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:35:36,981 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35508, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:35:36,983 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e025332d312f,45945,1732282535468 2024-11-22T13:35:36,984 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:36,987 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T13:35:36,987 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T13:35:36,987 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:35:36,987 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:35:36,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:36,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:36,988 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T13:35:36,988 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T13:35:36,988 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=355378974, stopped=false 2024-11-22T13:35:36,988 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e025332d312f,45945,1732282535468 2024-11-22T13:35:37,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:35:37,011 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:35:37,011 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:35:37,011 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:37,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:37,011 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:35:37,011 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:35:37,011 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:37,012 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:35:37,012 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e025332d312f,40233,1732282535632' ***** 2024-11-22T13:35:37,012 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T13:35:37,012 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:35:37,012 INFO [RS:0;e025332d312f:40233 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T13:35:37,012 INFO [RS:0;e025332d312f:40233 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T13:35:37,012 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T13:35:37,012 INFO [RS:0;e025332d312f:40233 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T13:35:37,012 INFO [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(959): stopping server e025332d312f,40233,1732282535632 2024-11-22T13:35:37,012 INFO [RS:0;e025332d312f:40233 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:35:37,012 INFO [RS:0;e025332d312f:40233 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e025332d312f:40233. 2024-11-22T13:35:37,013 DEBUG [RS:0;e025332d312f:40233 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:35:37,013 DEBUG [RS:0;e025332d312f:40233 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:37,013 INFO [RS:0;e025332d312f:40233 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T13:35:37,013 INFO [RS:0;e025332d312f:40233 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T13:35:37,013 INFO [RS:0;e025332d312f:40233 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T13:35:37,013 INFO [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T13:35:37,013 INFO [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T13:35:37,013 DEBUG [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T13:35:37,013 DEBUG [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T13:35:37,013 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:35:37,013 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:35:37,014 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:35:37,014 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:35:37,014 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:35:37,014 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T13:35:37,032 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740/.tmp/ns/35a29f3c7b154a658ad38d106591946f is 43, key is default/ns:d/1732282536849/Put/seqid=0 2024-11-22T13:35:37,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741835_1011 (size=5153) 2024-11-22T13:35:37,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741835_1011 (size=5153) 2024-11-22T13:35:37,039 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740/.tmp/ns/35a29f3c7b154a658ad38d106591946f 2024-11-22T13:35:37,050 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740/.tmp/ns/35a29f3c7b154a658ad38d106591946f as hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740/ns/35a29f3c7b154a658ad38d106591946f 2024-11-22T13:35:37,058 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740/ns/35a29f3c7b154a658ad38d106591946f, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T13:35:37,060 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false 2024-11-22T13:35:37,060 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T13:35:37,067 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T13:35:37,068 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:35:37,068 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:35:37,068 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282537013Running coprocessor pre-close hooks at 1732282537013Disabling compacts and flushes for region at 1732282537013Disabling writes for close at 1732282537014 (+1 ms)Obtaining lock to block concurrent updates at 1732282537014Preparing flush snapshotting stores in 1588230740 at 1732282537014Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732282537014Flushing stores of hbase:meta,,1.1588230740 at 1732282537015 (+1 ms)Flushing 1588230740/ns: creating writer at 1732282537015Flushing 1588230740/ns: appending metadata at 1732282537031 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732282537031Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@619bd72: reopening flushed file at 1732282537048 (+17 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false at 1732282537060 (+12 ms)Writing region close event to WAL at 1732282537062 (+2 ms)Running coprocessor post-close hooks at 1732282537068 (+6 ms)Closed at 1732282537068 2024-11-22T13:35:37,068 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T13:35:37,199 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T13:35:37,199 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T13:35:37,214 INFO [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(976): stopping server e025332d312f,40233,1732282535632; all regions closed. 2024-11-22T13:35:37,214 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,214 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,214 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,215 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,215 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741834_1010 (size=1152) 2024-11-22T13:35:37,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741834_1010 (size=1152) 2024-11-22T13:35:37,219 DEBUG [RS:0;e025332d312f:40233 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/oldWALs 2024-11-22T13:35:37,219 INFO [RS:0;e025332d312f:40233 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C40233%2C1732282535632.meta:.meta(num 1732282536749) 2024-11-22T13:35:37,220 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,220 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,220 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,220 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,220 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741833_1009 (size=93) 2024-11-22T13:35:37,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741833_1009 (size=93) 2024-11-22T13:35:37,225 DEBUG [RS:0;e025332d312f:40233 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/oldWALs 2024-11-22T13:35:37,225 INFO [RS:0;e025332d312f:40233 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C40233%2C1732282535632:(num 1732282536279) 2024-11-22T13:35:37,225 DEBUG [RS:0;e025332d312f:40233 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:37,225 INFO [RS:0;e025332d312f:40233 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:35:37,225 INFO [RS:0;e025332d312f:40233 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:35:37,226 INFO [RS:0;e025332d312f:40233 {}] hbase.ChoreService(370): Chore service for: regionserver/e025332d312f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T13:35:37,226 INFO [RS:0;e025332d312f:40233 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:35:37,226 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:35:37,226 INFO [RS:0;e025332d312f:40233 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40233 2024-11-22T13:35:37,238 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e025332d312f,40233,1732282535632 2024-11-22T13:35:37,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:35:37,238 INFO [RS:0;e025332d312f:40233 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:35:37,238 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e025332d312f,40233,1732282535632] 2024-11-22T13:35:37,258 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e025332d312f,40233,1732282535632 already deleted, retry=false 2024-11-22T13:35:37,259 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e025332d312f,40233,1732282535632 expired; onlineServers=0 2024-11-22T13:35:37,259 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e025332d312f,45945,1732282535468' ***** 2024-11-22T13:35:37,259 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T13:35:37,259 INFO [M:0;e025332d312f:45945 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:35:37,259 INFO [M:0;e025332d312f:45945 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:35:37,259 DEBUG [M:0;e025332d312f:45945 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T13:35:37,259 DEBUG [M:0;e025332d312f:45945 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T13:35:37,259 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T13:35:37,259 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282536024 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282536024,5,FailOnTimeoutGroup] 2024-11-22T13:35:37,259 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282536024 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282536024,5,FailOnTimeoutGroup] 2024-11-22T13:35:37,259 INFO [M:0;e025332d312f:45945 {}] hbase.ChoreService(370): Chore service for: master/e025332d312f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T13:35:37,260 INFO [M:0;e025332d312f:45945 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:35:37,260 DEBUG [M:0;e025332d312f:45945 {}] master.HMaster(1795): Stopping service threads 2024-11-22T13:35:37,260 INFO [M:0;e025332d312f:45945 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T13:35:37,260 INFO [M:0;e025332d312f:45945 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:35:37,260 INFO [M:0;e025332d312f:45945 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T13:35:37,260 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T13:35:37,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T13:35:37,269 DEBUG [M:0;e025332d312f:45945 {}] zookeeper.ZKUtil(347): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T13:35:37,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:37,269 WARN [M:0;e025332d312f:45945 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T13:35:37,270 INFO [M:0;e025332d312f:45945 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/.lastflushedseqids 2024-11-22T13:35:37,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741836_1012 (size=99) 2024-11-22T13:35:37,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741836_1012 (size=99) 2024-11-22T13:35:37,277 INFO [M:0;e025332d312f:45945 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T13:35:37,277 INFO [M:0;e025332d312f:45945 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T13:35:37,277 DEBUG [M:0;e025332d312f:45945 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:35:37,277 INFO [M:0;e025332d312f:45945 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:37,277 DEBUG [M:0;e025332d312f:45945 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:37,277 DEBUG [M:0;e025332d312f:45945 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:35:37,277 DEBUG [M:0;e025332d312f:45945 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:37,277 INFO [M:0;e025332d312f:45945 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T13:35:37,297 DEBUG [M:0;e025332d312f:45945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c652a915be4549bc86692f7f6f6888d2 is 82, key is hbase:meta,,1/info:regioninfo/1732282536785/Put/seqid=0 2024-11-22T13:35:37,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741837_1013 (size=5672) 2024-11-22T13:35:37,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741837_1013 (size=5672) 2024-11-22T13:35:37,304 INFO [M:0;e025332d312f:45945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c652a915be4549bc86692f7f6f6888d2 2024-11-22T13:35:37,326 DEBUG [M:0;e025332d312f:45945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6513c8fdbc2c4a79bc9597d204b9d318 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732282536855/Put/seqid=0 2024-11-22T13:35:37,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741838_1014 (size=5275) 2024-11-22T13:35:37,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741838_1014 (size=5275) 2024-11-22T13:35:37,333 INFO [M:0;e025332d312f:45945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6513c8fdbc2c4a79bc9597d204b9d318 2024-11-22T13:35:37,348 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:35:37,348 INFO [RS:0;e025332d312f:40233 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:35:37,348 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40233-0x10162c1c9c60001, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:35:37,348 INFO [RS:0;e025332d312f:40233 {}] regionserver.HRegionServer(1031): Exiting; stopping=e025332d312f,40233,1732282535632; zookeeper connection closed. 2024-11-22T13:35:37,349 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1790e0a4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1790e0a4 2024-11-22T13:35:37,349 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T13:35:37,362 DEBUG [M:0;e025332d312f:45945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c631c77f362a4b8e94b2fcba98dfc500 is 69, key is e025332d312f,40233,1732282535632/rs:state/1732282536103/Put/seqid=0 2024-11-22T13:35:37,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741839_1015 (size=5156) 2024-11-22T13:35:37,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741839_1015 (size=5156) 2024-11-22T13:35:37,370 INFO [M:0;e025332d312f:45945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c631c77f362a4b8e94b2fcba98dfc500 2024-11-22T13:35:37,397 DEBUG [M:0;e025332d312f:45945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/59eddfaf3a6a461d9c42b0c89375a12f is 52, key is load_balancer_on/state:d/1732282536986/Put/seqid=0 2024-11-22T13:35:37,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741840_1016 (size=5056) 2024-11-22T13:35:37,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741840_1016 (size=5056) 2024-11-22T13:35:37,404 INFO [M:0;e025332d312f:45945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/59eddfaf3a6a461d9c42b0c89375a12f 2024-11-22T13:35:37,411 DEBUG [M:0;e025332d312f:45945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c652a915be4549bc86692f7f6f6888d2 as hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c652a915be4549bc86692f7f6f6888d2 2024-11-22T13:35:37,418 INFO [M:0;e025332d312f:45945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c652a915be4549bc86692f7f6f6888d2, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T13:35:37,419 DEBUG [M:0;e025332d312f:45945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6513c8fdbc2c4a79bc9597d204b9d318 as hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6513c8fdbc2c4a79bc9597d204b9d318 2024-11-22T13:35:37,426 INFO [M:0;e025332d312f:45945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6513c8fdbc2c4a79bc9597d204b9d318, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T13:35:37,428 DEBUG [M:0;e025332d312f:45945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/c631c77f362a4b8e94b2fcba98dfc500 as hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c631c77f362a4b8e94b2fcba98dfc500 2024-11-22T13:35:37,435 INFO [M:0;e025332d312f:45945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/c631c77f362a4b8e94b2fcba98dfc500, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T13:35:37,437 DEBUG [M:0;e025332d312f:45945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/59eddfaf3a6a461d9c42b0c89375a12f as hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/59eddfaf3a6a461d9c42b0c89375a12f 2024-11-22T13:35:37,444 INFO [M:0;e025332d312f:45945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36837/user/jenkins/test-data/b92a1c5c-2a50-575b-4f28-462d2fa6cfbf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/59eddfaf3a6a461d9c42b0c89375a12f, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T13:35:37,445 INFO [M:0;e025332d312f:45945 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 168ms, sequenceid=29, compaction requested=false 2024-11-22T13:35:37,448 INFO [M:0;e025332d312f:45945 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:37,448 DEBUG [M:0;e025332d312f:45945 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282537277Disabling compacts and flushes for region at 1732282537277Disabling writes for close at 1732282537277Obtaining lock to block concurrent updates at 1732282537277Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732282537277Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732282537278 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732282537279 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732282537279Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732282537296 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732282537296Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732282537310 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732282537326 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732282537326Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732282537341 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732282537362 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732282537362Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732282537377 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732282537397 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732282537397Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15c9b26: reopening flushed file at 1732282537410 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27fa891b: reopening flushed file at 1732282537418 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f3e998f: reopening flushed file at 1732282537427 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c72c16d: reopening flushed file at 1732282537436 (+9 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 168ms, sequenceid=29, compaction requested=false at 1732282537445 (+9 ms)Writing region close event to WAL at 1732282537447 (+2 ms)Closed at 1732282537448 (+1 ms) 2024-11-22T13:35:37,448 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,448 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,449 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,449 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,449 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:37,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38645 is added to blk_1073741830_1006 (size=10311) 2024-11-22T13:35:37,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42413 is added to blk_1073741830_1006 (size=10311) 2024-11-22T13:35:37,452 INFO [M:0;e025332d312f:45945 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T13:35:37,452 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:35:37,452 INFO [M:0;e025332d312f:45945 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45945 2024-11-22T13:35:37,453 INFO [M:0;e025332d312f:45945 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:35:37,564 INFO [M:0;e025332d312f:45945 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:35:37,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:35:37,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45945-0x10162c1c9c60000, quorum=127.0.0.1:64214, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:35:37,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f5c23ef{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:37,567 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@716c7b87{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:37,567 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:37,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bc8c098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:37,567 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b915b67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:37,569 WARN [BP-1539379056-172.17.0.2-1732282533293 heartbeating to localhost/127.0.0.1:36837 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:35:37,569 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:35:37,569 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:35:37,569 WARN [BP-1539379056-172.17.0.2-1732282533293 heartbeating to localhost/127.0.0.1:36837 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1539379056-172.17.0.2-1732282533293 (Datanode Uuid 75735514-ca04-4497-878f-2cd8a886e920) service to localhost/127.0.0.1:36837 2024-11-22T13:35:37,569 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/data/data3/current/BP-1539379056-172.17.0.2-1732282533293 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:37,570 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/data/data4/current/BP-1539379056-172.17.0.2-1732282533293 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:37,570 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:35:37,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52b07bdb{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:37,572 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70121b28{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:37,572 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:37,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3aa9354f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:37,572 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c8914e7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:37,574 WARN [BP-1539379056-172.17.0.2-1732282533293 heartbeating to localhost/127.0.0.1:36837 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:35:37,574 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:35:37,574 WARN [BP-1539379056-172.17.0.2-1732282533293 heartbeating to localhost/127.0.0.1:36837 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1539379056-172.17.0.2-1732282533293 (Datanode Uuid 12c1ec67-6b4b-4b40-89ea-1c76a793b3a1) service to localhost/127.0.0.1:36837 2024-11-22T13:35:37,574 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:35:37,574 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/data/data1/current/BP-1539379056-172.17.0.2-1732282533293 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:37,574 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/cluster_4bacf7a3-1dcf-7cdd-b9d9-2375a1e24f7c/data/data2/current/BP-1539379056-172.17.0.2-1732282533293 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:37,575 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:35:37,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@493d1d34{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:35:37,582 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a249094{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:37,582 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:37,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75cbfab9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:37,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65506a11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:37,589 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T13:35:37,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T13:35:37,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T13:35:37,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.log.dir so I do NOT create it in target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c 2024-11-22T13:35:37,605 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/4b100bf1-58c1-913b-ca7e-e1ef51af7ede/hadoop.tmp.dir so I do NOT create it in target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c 2024-11-22T13:35:37,606 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0, deleteOnExit=true 2024-11-22T13:35:37,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T13:35:37,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/test.cache.data in system properties and HBase conf 2024-11-22T13:35:37,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T13:35:37,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir in system properties and HBase conf 2024-11-22T13:35:37,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T13:35:37,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T13:35:37,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T13:35:37,606 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T13:35:37,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:35:37,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:35:37,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T13:35:37,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:35:37,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T13:35:37,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T13:35:37,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:35:37,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:35:37,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T13:35:37,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/nfs.dump.dir in system properties and HBase conf 2024-11-22T13:35:37,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/java.io.tmpdir in system properties and HBase conf 2024-11-22T13:35:37,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:35:37,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T13:35:37,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T13:35:37,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:37,622 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:35:37,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:37,945 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T13:35:37,951 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:37,960 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:37,962 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:37,962 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:37,984 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:37,990 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:35:37,991 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:35:37,991 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:35:37,991 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T13:35:37,992 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:37,992 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b918d2a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:35:37,993 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20aa2ea7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:35:38,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e195dbd{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/java.io.tmpdir/jetty-localhost-37377-hadoop-hdfs-3_4_1-tests_jar-_-any-5321141746542402065/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:35:38,087 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d9b1613{HTTP/1.1, (http/1.1)}{localhost:37377} 2024-11-22T13:35:38,087 INFO [Time-limited test {}] server.Server(415): Started @107266ms 2024-11-22T13:35:38,099 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:35:38,130 INFO [regionserver/e025332d312f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:35:38,342 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:38,345 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:35:38,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:35:38,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:35:38,346 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:35:38,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@651aa118{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:35:38,347 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48d478e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:35:38,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3e10767c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/java.io.tmpdir/jetty-localhost-35041-hadoop-hdfs-3_4_1-tests_jar-_-any-6506308234675355560/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:38,440 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d2d9832{HTTP/1.1, (http/1.1)}{localhost:35041} 2024-11-22T13:35:38,440 INFO [Time-limited test {}] server.Server(415): Started @107619ms 2024-11-22T13:35:38,441 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:35:38,469 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:38,473 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:35:38,474 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:35:38,474 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:35:38,474 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:35:38,475 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d5daa57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:35:38,475 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10a4d310{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:35:38,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@544fa662{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/java.io.tmpdir/jetty-localhost-39255-hadoop-hdfs-3_4_1-tests_jar-_-any-15636135016352378799/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:38,570 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3574ce3f{HTTP/1.1, (http/1.1)}{localhost:39255} 2024-11-22T13:35:38,570 INFO [Time-limited test {}] server.Server(415): Started @107749ms 2024-11-22T13:35:38,571 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:35:39,683 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data1/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:39,684 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data2/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:39,707 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:35:39,710 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a568486434480fb with lease ID 0xfb4f9c7da85b9fca: Processing first storage report for DS-b45bda0f-803e-40b5-b549-7b8f9276a514 from datanode DatanodeRegistration(127.0.0.1:45457, datanodeUuid=2fab4074-db1e-4f7c-946e-7ff502acb41d, infoPort=33233, infoSecurePort=0, ipcPort=38707, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:39,710 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a568486434480fb with lease ID 0xfb4f9c7da85b9fca: from storage DS-b45bda0f-803e-40b5-b549-7b8f9276a514 node DatanodeRegistration(127.0.0.1:45457, datanodeUuid=2fab4074-db1e-4f7c-946e-7ff502acb41d, infoPort=33233, infoSecurePort=0, ipcPort=38707, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:39,710 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a568486434480fb with lease ID 0xfb4f9c7da85b9fca: Processing first storage report for DS-652065fd-ec16-4499-9358-30c74849afa6 from datanode DatanodeRegistration(127.0.0.1:45457, datanodeUuid=2fab4074-db1e-4f7c-946e-7ff502acb41d, infoPort=33233, infoSecurePort=0, ipcPort=38707, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:39,710 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a568486434480fb with lease ID 0xfb4f9c7da85b9fca: from storage DS-652065fd-ec16-4499-9358-30c74849afa6 node DatanodeRegistration(127.0.0.1:45457, datanodeUuid=2fab4074-db1e-4f7c-946e-7ff502acb41d, infoPort=33233, infoSecurePort=0, ipcPort=38707, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:39,823 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data4/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:39,823 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data3/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:39,839 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:35:39,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f98861c0df42e1a with lease ID 0xfb4f9c7da85b9fcb: Processing first storage report for DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f from datanode DatanodeRegistration(127.0.0.1:43315, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=38501, infoSecurePort=0, ipcPort=41413, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:39,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f98861c0df42e1a with lease ID 0xfb4f9c7da85b9fcb: from storage DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f node DatanodeRegistration(127.0.0.1:43315, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=38501, infoSecurePort=0, ipcPort=41413, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:39,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7f98861c0df42e1a with lease ID 0xfb4f9c7da85b9fcb: Processing first storage report for DS-0803e8e7-14c4-4860-865f-3228e2024d23 from datanode DatanodeRegistration(127.0.0.1:43315, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=38501, infoSecurePort=0, ipcPort=41413, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:39,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7f98861c0df42e1a with lease ID 0xfb4f9c7da85b9fcb: from storage DS-0803e8e7-14c4-4860-865f-3228e2024d23 node DatanodeRegistration(127.0.0.1:43315, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=38501, infoSecurePort=0, ipcPort=41413, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:39,911 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c 2024-11-22T13:35:39,915 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/zookeeper_0, clientPort=62897, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T13:35:39,917 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62897 2024-11-22T13:35:39,917 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:39,920 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:39,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43315 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:35:39,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45457 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:35:39,935 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7 with version=8 2024-11-22T13:35:39,935 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/hbase-staging 2024-11-22T13:35:39,938 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:35:39,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:39,938 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:39,938 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:35:39,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:39,939 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:35:39,939 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T13:35:39,939 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:35:39,940 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35487 2024-11-22T13:35:39,942 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35487 connecting to ZooKeeper ensemble=127.0.0.1:62897 2024-11-22T13:35:39,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:354870x0, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:35:39,997 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35487-0x10162c1db350000 connected 2024-11-22T13:35:40,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:40,088 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:40,092 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:35:40,093 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7, hbase.cluster.distributed=false 2024-11-22T13:35:40,096 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:35:40,096 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35487 2024-11-22T13:35:40,097 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35487 2024-11-22T13:35:40,097 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35487 2024-11-22T13:35:40,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35487 2024-11-22T13:35:40,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35487 2024-11-22T13:35:40,116 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:35:40,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:40,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:40,116 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:35:40,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:40,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:35:40,116 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T13:35:40,116 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:35:40,117 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41959 2024-11-22T13:35:40,118 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41959 connecting to ZooKeeper ensemble=127.0.0.1:62897 2024-11-22T13:35:40,119 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:40,120 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:40,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419590x0, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:35:40,132 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41959-0x10162c1db350001 connected 2024-11-22T13:35:40,133 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:35:40,133 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T13:35:40,133 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T13:35:40,134 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T13:35:40,135 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:35:40,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41959 2024-11-22T13:35:40,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41959 2024-11-22T13:35:40,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41959 2024-11-22T13:35:40,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41959 2024-11-22T13:35:40,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41959 2024-11-22T13:35:40,151 DEBUG [M:0;e025332d312f:35487 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e025332d312f:35487 2024-11-22T13:35:40,152 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e025332d312f,35487,1732282539938 2024-11-22T13:35:40,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:35:40,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:35:40,164 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e025332d312f,35487,1732282539938 2024-11-22T13:35:40,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T13:35:40,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,175 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T13:35:40,176 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e025332d312f,35487,1732282539938 from backup master directory 2024-11-22T13:35:40,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e025332d312f,35487,1732282539938 2024-11-22T13:35:40,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:35:40,185 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:35:40,185 WARN [master/e025332d312f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:35:40,185 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e025332d312f,35487,1732282539938 2024-11-22T13:35:40,193 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/hbase.id] with ID: 10ab0f3b-1115-4f6c-a1e6-64b56eaa2c7a 2024-11-22T13:35:40,193 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/.tmp/hbase.id 2024-11-22T13:35:40,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45457 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:35:40,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43315 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:35:40,200 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/.tmp/hbase.id]:[hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/hbase.id] 2024-11-22T13:35:40,213 INFO [master/e025332d312f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:40,213 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T13:35:40,214 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T13:35:40,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43315 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:35:40,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45457 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:35:40,238 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:35:40,240 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T13:35:40,240 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:35:40,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45457 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:35:40,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43315 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:35:40,248 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store 2024-11-22T13:35:40,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43315 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:35:40,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45457 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:35:40,257 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:40,257 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:35:40,257 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:40,257 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:40,257 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:35:40,258 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:40,258 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:35:40,258 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282540257Disabling compacts and flushes for region at 1732282540257Disabling writes for close at 1732282540257Writing region close event to WAL at 1732282540258 (+1 ms)Closed at 1732282540258 2024-11-22T13:35:40,259 WARN [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/.initializing 2024-11-22T13:35:40,259 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938 2024-11-22T13:35:40,262 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C35487%2C1732282539938, suffix=, logDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938, archiveDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/oldWALs, maxLogs=10 2024-11-22T13:35:40,263 INFO [master/e025332d312f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C35487%2C1732282539938.1732282540263 2024-11-22T13:35:40,269 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 2024-11-22T13:35:40,270 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33233:33233),(127.0.0.1/127.0.0.1:38501:38501)] 2024-11-22T13:35:40,270 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:35:40,271 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:40,271 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,271 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T13:35:40,274 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:40,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:40,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T13:35:40,277 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:40,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:35:40,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T13:35:40,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:40,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:35:40,280 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,281 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T13:35:40,281 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:40,282 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:35:40,282 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,283 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,284 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,285 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,285 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,286 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T13:35:40,287 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:35:40,290 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:35:40,291 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742259, jitterRate=-0.05616973340511322}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T13:35:40,292 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732282540271Initializing all the Stores at 1732282540272 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282540272Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282540272Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282540272Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282540272Cleaning up temporary data from old regions at 1732282540285 (+13 ms)Region opened successfully at 1732282540292 (+7 ms) 2024-11-22T13:35:40,292 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T13:35:40,296 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a85d048, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:35:40,297 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T13:35:40,297 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T13:35:40,298 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T13:35:40,298 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T13:35:40,298 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T13:35:40,299 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T13:35:40,299 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T13:35:40,301 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T13:35:40,302 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T13:35:40,311 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T13:35:40,311 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T13:35:40,312 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T13:35:40,321 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T13:35:40,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T13:35:40,323 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T13:35:40,332 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T13:35:40,333 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T13:35:40,342 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T13:35:40,344 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T13:35:40,353 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T13:35:40,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:35:40,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:35:40,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,364 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e025332d312f,35487,1732282539938, sessionid=0x10162c1db350000, setting cluster-up flag (Was=false) 2024-11-22T13:35:40,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,416 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T13:35:40,418 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,35487,1732282539938 2024-11-22T13:35:40,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,437 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:40,469 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T13:35:40,470 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,35487,1732282539938 2024-11-22T13:35:40,472 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T13:35:40,473 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T13:35:40,474 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T13:35:40,474 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T13:35:40,474 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e025332d312f,35487,1732282539938 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T13:35:40,476 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:35:40,476 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:35:40,476 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:35:40,476 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:35:40,476 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e025332d312f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T13:35:40,476 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,476 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:35:40,476 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,477 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732282570477 2024-11-22T13:35:40,477 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T13:35:40,477 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T13:35:40,477 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T13:35:40,477 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T13:35:40,477 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T13:35:40,477 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T13:35:40,478 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,478 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T13:35:40,478 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T13:35:40,478 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:35:40,478 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T13:35:40,478 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T13:35:40,479 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T13:35:40,479 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T13:35:40,479 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282540479,5,FailOnTimeoutGroup] 2024-11-22T13:35:40,479 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282540479,5,FailOnTimeoutGroup] 2024-11-22T13:35:40,479 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,479 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T13:35:40,479 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,479 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,480 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:40,480 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T13:35:40,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43315 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:35:40,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45457 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:35:40,490 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T13:35:40,490 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7 2024-11-22T13:35:40,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45457 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:35:40,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43315 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:35:40,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:40,505 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:35:40,507 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:35:40,507 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:40,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:40,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:35:40,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:35:40,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:40,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:40,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:35:40,513 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:35:40,513 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:40,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:40,514 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:35:40,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:35:40,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:40,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:40,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:35:40,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740 2024-11-22T13:35:40,518 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740 2024-11-22T13:35:40,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:35:40,520 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:35:40,521 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:35:40,547 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:35:40,548 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(746): ClusterId : 10ab0f3b-1115-4f6c-a1e6-64b56eaa2c7a 2024-11-22T13:35:40,549 DEBUG [RS:0;e025332d312f:41959 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T13:35:40,550 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:35:40,551 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=848732, jitterRate=0.07921916246414185}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:35:40,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732282540503Initializing all the Stores at 1732282540504 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282540504Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282540505 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282540505Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282540505Cleaning up temporary data from old regions at 1732282540520 (+15 ms)Region opened successfully at 1732282540552 (+32 ms) 2024-11-22T13:35:40,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:35:40,552 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:35:40,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:35:40,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:35:40,552 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:35:40,553 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:35:40,553 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282540552Disabling compacts and flushes for region at 1732282540552Disabling writes for close at 1732282540552Writing region close event to WAL at 1732282540553 (+1 ms)Closed at 1732282540553 2024-11-22T13:35:40,555 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:35:40,555 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T13:35:40,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T13:35:40,557 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:35:40,558 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T13:35:40,559 DEBUG [RS:0;e025332d312f:41959 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T13:35:40,559 DEBUG [RS:0;e025332d312f:41959 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T13:35:40,569 DEBUG [RS:0;e025332d312f:41959 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T13:35:40,570 DEBUG [RS:0;e025332d312f:41959 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32cb990, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:35:40,588 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e025332d312f:41959 2024-11-22T13:35:40,588 INFO [RS:0;e025332d312f:41959 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T13:35:40,588 INFO [RS:0;e025332d312f:41959 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T13:35:40,588 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T13:35:40,589 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(2659): reportForDuty to master=e025332d312f,35487,1732282539938 with port=41959, startcode=1732282540115 2024-11-22T13:35:40,589 DEBUG [RS:0;e025332d312f:41959 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T13:35:40,591 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54705, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T13:35:40,592 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35487 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e025332d312f,41959,1732282540115 2024-11-22T13:35:40,592 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35487 {}] master.ServerManager(517): Registering regionserver=e025332d312f,41959,1732282540115 2024-11-22T13:35:40,594 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7 2024-11-22T13:35:40,594 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46035 2024-11-22T13:35:40,594 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T13:35:40,606 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:35:40,606 DEBUG [RS:0;e025332d312f:41959 {}] zookeeper.ZKUtil(111): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e025332d312f,41959,1732282540115 2024-11-22T13:35:40,607 WARN [RS:0;e025332d312f:41959 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:35:40,607 INFO [RS:0;e025332d312f:41959 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:35:40,607 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e025332d312f,41959,1732282540115] 2024-11-22T13:35:40,607 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115 2024-11-22T13:35:40,611 INFO [RS:0;e025332d312f:41959 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T13:35:40,614 INFO [RS:0;e025332d312f:41959 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T13:35:40,615 INFO [RS:0;e025332d312f:41959 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T13:35:40,615 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,615 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T13:35:40,616 INFO [RS:0;e025332d312f:41959 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T13:35:40,617 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,617 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,618 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:40,618 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:35:40,618 DEBUG [RS:0;e025332d312f:41959 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:35:40,618 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,618 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,618 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,618 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,618 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,618 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41959,1732282540115-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:35:40,633 INFO [RS:0;e025332d312f:41959 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T13:35:40,633 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41959,1732282540115-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,634 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,634 INFO [RS:0;e025332d312f:41959 {}] regionserver.Replication(171): e025332d312f,41959,1732282540115 started 2024-11-22T13:35:40,648 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:40,648 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1482): Serving as e025332d312f,41959,1732282540115, RpcServer on e025332d312f/172.17.0.2:41959, sessionid=0x10162c1db350001 2024-11-22T13:35:40,648 DEBUG [RS:0;e025332d312f:41959 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T13:35:40,648 DEBUG [RS:0;e025332d312f:41959 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e025332d312f,41959,1732282540115 2024-11-22T13:35:40,649 DEBUG [RS:0;e025332d312f:41959 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,41959,1732282540115' 2024-11-22T13:35:40,649 DEBUG [RS:0;e025332d312f:41959 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T13:35:40,649 DEBUG [RS:0;e025332d312f:41959 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T13:35:40,650 DEBUG [RS:0;e025332d312f:41959 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T13:35:40,650 DEBUG [RS:0;e025332d312f:41959 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T13:35:40,650 DEBUG [RS:0;e025332d312f:41959 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e025332d312f,41959,1732282540115 2024-11-22T13:35:40,650 DEBUG [RS:0;e025332d312f:41959 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,41959,1732282540115' 2024-11-22T13:35:40,650 DEBUG [RS:0;e025332d312f:41959 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T13:35:40,650 DEBUG [RS:0;e025332d312f:41959 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T13:35:40,651 DEBUG [RS:0;e025332d312f:41959 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T13:35:40,651 INFO [RS:0;e025332d312f:41959 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T13:35:40,651 INFO [RS:0;e025332d312f:41959 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T13:35:40,709 WARN [e025332d312f:35487 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T13:35:40,753 INFO [RS:0;e025332d312f:41959 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C41959%2C1732282540115, suffix=, logDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115, archiveDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs, maxLogs=32 2024-11-22T13:35:40,754 INFO [RS:0;e025332d312f:41959 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41959%2C1732282540115.1732282540754 2024-11-22T13:35:40,761 INFO [RS:0;e025332d312f:41959 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 2024-11-22T13:35:40,762 DEBUG [RS:0;e025332d312f:41959 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33233:33233),(127.0.0.1/127.0.0.1:38501:38501)] 2024-11-22T13:35:40,959 DEBUG [e025332d312f:35487 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T13:35:40,960 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e025332d312f,41959,1732282540115 2024-11-22T13:35:40,963 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,41959,1732282540115, state=OPENING 2024-11-22T13:35:41,032 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T13:35:41,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:41,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:35:41,044 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:35:41,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,41959,1732282540115}] 2024-11-22T13:35:41,045 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:35:41,045 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:35:41,202 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T13:35:41,207 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47723, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T13:35:41,213 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T13:35:41,213 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:35:41,216 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C41959%2C1732282540115.meta, suffix=.meta, logDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115, archiveDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs, maxLogs=32 2024-11-22T13:35:41,217 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta 2024-11-22T13:35:41,226 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta 2024-11-22T13:35:41,227 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33233:33233),(127.0.0.1/127.0.0.1:38501:38501)] 2024-11-22T13:35:41,229 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:35:41,229 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T13:35:41,229 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T13:35:41,229 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T13:35:41,230 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T13:35:41,230 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:41,230 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T13:35:41,230 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T13:35:41,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:35:41,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:35:41,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:41,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:41,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:35:41,236 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:35:41,236 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:41,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:41,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:35:41,239 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:35:41,239 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:41,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:41,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:35:41,240 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:35:41,241 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:41,241 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:35:41,241 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:35:41,242 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740 2024-11-22T13:35:41,243 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740 2024-11-22T13:35:41,244 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:35:41,244 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:35:41,245 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:35:41,246 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:35:41,247 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770147, jitterRate=-0.020708054304122925}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:35:41,247 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T13:35:41,248 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732282541230Writing region info on filesystem at 1732282541230Initializing all the Stores at 1732282541232 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282541232Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282541232Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282541232Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282541232Cleaning up temporary data from old regions at 1732282541244 (+12 ms)Running coprocessor post-open hooks at 1732282541248 (+4 ms)Region opened successfully at 1732282541248 2024-11-22T13:35:41,249 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732282541201 2024-11-22T13:35:41,252 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T13:35:41,252 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T13:35:41,253 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,41959,1732282540115 2024-11-22T13:35:41,255 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,41959,1732282540115, state=OPEN 2024-11-22T13:35:41,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:35:41,293 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:35:41,293 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e025332d312f,41959,1732282540115 2024-11-22T13:35:41,293 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:35:41,293 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:35:41,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T13:35:41,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,41959,1732282540115 in 249 msec 2024-11-22T13:35:41,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T13:35:41,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 742 msec 2024-11-22T13:35:41,302 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:35:41,302 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T13:35:41,304 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:35:41,304 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,41959,1732282540115, seqNum=-1] 2024-11-22T13:35:41,305 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:35:41,306 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54777, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:35:41,314 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 839 msec 2024-11-22T13:35:41,314 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732282541314, completionTime=-1 2024-11-22T13:35:41,314 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T13:35:41,314 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732282601317 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732282661317 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35487,1732282539938-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35487,1732282539938-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35487,1732282539938-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e025332d312f:35487, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,319 DEBUG [master/e025332d312f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T13:35:41,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.136sec 2024-11-22T13:35:41,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T13:35:41,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T13:35:41,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T13:35:41,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T13:35:41,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T13:35:41,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35487,1732282539938-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:35:41,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35487,1732282539938-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T13:35:41,325 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T13:35:41,325 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T13:35:41,325 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35487,1732282539938-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,349 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543a1dd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:35:41,349 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e025332d312f,35487,-1 for getting cluster id 2024-11-22T13:35:41,349 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T13:35:41,351 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '10ab0f3b-1115-4f6c-a1e6-64b56eaa2c7a' 2024-11-22T13:35:41,352 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T13:35:41,352 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "10ab0f3b-1115-4f6c-a1e6-64b56eaa2c7a" 2024-11-22T13:35:41,352 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33a28f55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:35:41,353 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e025332d312f,35487,-1] 2024-11-22T13:35:41,353 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T13:35:41,353 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:35:41,355 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T13:35:41,357 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60b19f04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:35:41,357 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:35:41,359 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,41959,1732282540115, seqNum=-1] 2024-11-22T13:35:41,359 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:35:41,362 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58236, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:35:41,364 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e025332d312f,35487,1732282539938 2024-11-22T13:35:41,365 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:41,368 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T13:35:41,385 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:35:41,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:41,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:41,385 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:35:41,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:35:41,385 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:35:41,385 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T13:35:41,385 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:35:41,386 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40403 2024-11-22T13:35:41,387 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40403 connecting to ZooKeeper ensemble=127.0.0.1:62897 2024-11-22T13:35:41,388 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:41,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:35:41,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:404030x0, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:35:41,411 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-22T13:35:41,411 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40403-0x10162c1db350002 connected 2024-11-22T13:35:41,411 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-22T13:35:41,412 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T13:35:41,412 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T13:35:41,413 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T13:35:41,415 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:35:41,418 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40403 2024-11-22T13:35:41,420 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40403 2024-11-22T13:35:41,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40403 2024-11-22T13:35:41,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40403 2024-11-22T13:35:41,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40403 2024-11-22T13:35:41,423 INFO [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(746): ClusterId : 10ab0f3b-1115-4f6c-a1e6-64b56eaa2c7a 2024-11-22T13:35:41,423 DEBUG [RS:1;e025332d312f:40403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T13:35:41,432 DEBUG [RS:1;e025332d312f:40403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T13:35:41,432 DEBUG [RS:1;e025332d312f:40403 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T13:35:41,443 DEBUG [RS:1;e025332d312f:40403 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T13:35:41,443 DEBUG [RS:1;e025332d312f:40403 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59d11b7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:35:41,457 DEBUG [RS:1;e025332d312f:40403 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;e025332d312f:40403 2024-11-22T13:35:41,457 INFO [RS:1;e025332d312f:40403 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T13:35:41,457 INFO [RS:1;e025332d312f:40403 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T13:35:41,457 DEBUG [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T13:35:41,457 INFO [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(2659): reportForDuty to master=e025332d312f,35487,1732282539938 with port=40403, startcode=1732282541385 2024-11-22T13:35:41,457 DEBUG [RS:1;e025332d312f:40403 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T13:35:41,459 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48483, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T13:35:41,459 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35487 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e025332d312f,40403,1732282541385 2024-11-22T13:35:41,459 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35487 {}] master.ServerManager(517): Registering regionserver=e025332d312f,40403,1732282541385 2024-11-22T13:35:41,461 DEBUG [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7 2024-11-22T13:35:41,461 DEBUG [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46035 2024-11-22T13:35:41,461 DEBUG [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T13:35:41,473 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:35:41,474 DEBUG [RS:1;e025332d312f:40403 {}] zookeeper.ZKUtil(111): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e025332d312f,40403,1732282541385 2024-11-22T13:35:41,474 WARN [RS:1;e025332d312f:40403 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:35:41,474 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e025332d312f,40403,1732282541385] 2024-11-22T13:35:41,474 INFO [RS:1;e025332d312f:40403 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:35:41,474 DEBUG [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385 2024-11-22T13:35:41,479 INFO [RS:1;e025332d312f:40403 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T13:35:41,482 INFO [RS:1;e025332d312f:40403 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T13:35:41,482 INFO [RS:1;e025332d312f:40403 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T13:35:41,482 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,483 INFO [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T13:35:41,484 INFO [RS:1;e025332d312f:40403 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T13:35:41,484 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,484 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,484 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,485 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:35:41,486 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:35:41,486 DEBUG [RS:1;e025332d312f:40403 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:35:41,486 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,486 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,486 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,486 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,486 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,486 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,40403,1732282541385-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:35:41,504 INFO [RS:1;e025332d312f:40403 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T13:35:41,504 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,40403,1732282541385-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,504 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,504 INFO [RS:1;e025332d312f:40403 {}] regionserver.Replication(171): e025332d312f,40403,1732282541385 started 2024-11-22T13:35:41,517 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:35:41,517 INFO [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(1482): Serving as e025332d312f,40403,1732282541385, RpcServer on e025332d312f/172.17.0.2:40403, sessionid=0x10162c1db350002 2024-11-22T13:35:41,517 DEBUG [RS:1;e025332d312f:40403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T13:35:41,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;e025332d312f:40403,5,FailOnTimeoutGroup] 2024-11-22T13:35:41,517 DEBUG [RS:1;e025332d312f:40403 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e025332d312f,40403,1732282541385 2024-11-22T13:35:41,517 DEBUG [RS:1;e025332d312f:40403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,40403,1732282541385' 2024-11-22T13:35:41,517 DEBUG [RS:1;e025332d312f:40403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T13:35:41,517 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-22T13:35:41,518 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T13:35:41,518 DEBUG [RS:1;e025332d312f:40403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T13:35:41,518 DEBUG [RS:1;e025332d312f:40403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T13:35:41,518 DEBUG [RS:1;e025332d312f:40403 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T13:35:41,518 DEBUG [RS:1;e025332d312f:40403 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e025332d312f,40403,1732282541385 2024-11-22T13:35:41,518 DEBUG [RS:1;e025332d312f:40403 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,40403,1732282541385' 2024-11-22T13:35:41,518 DEBUG [RS:1;e025332d312f:40403 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T13:35:41,519 DEBUG [RS:1;e025332d312f:40403 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T13:35:41,519 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is e025332d312f,35487,1732282539938 2024-11-22T13:35:41,519 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7fa0b9f8 2024-11-22T13:35:41,519 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T13:35:41,519 DEBUG [RS:1;e025332d312f:40403 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T13:35:41,519 INFO [RS:1;e025332d312f:40403 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T13:35:41,519 INFO [RS:1;e025332d312f:40403 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T13:35:41,521 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54210, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T13:35:41,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35487 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T13:35:41,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35487 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T13:35:41,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35487 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:35:41,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35487 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T13:35:41,526 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T13:35:41,526 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:41,526 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35487 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-22T13:35:41,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:35:41,527 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T13:35:41,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45457 is added to blk_1073741835_1011 (size=393) 2024-11-22T13:35:41,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43315 is added to blk_1073741835_1011 (size=393) 2024-11-22T13:35:41,537 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2a7538bdac35a5c8f19a45c3de9ff4c0, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7 2024-11-22T13:35:41,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43315 is added to blk_1073741836_1012 (size=76) 2024-11-22T13:35:41,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45457 is added to blk_1073741836_1012 (size=76) 2024-11-22T13:35:41,547 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:41,547 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 2a7538bdac35a5c8f19a45c3de9ff4c0, disabling compactions & flushes 2024-11-22T13:35:41,547 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:35:41,547 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:35:41,547 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. after waiting 0 ms 2024-11-22T13:35:41,547 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:35:41,547 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:35:41,548 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: Waiting for close lock at 1732282541547Disabling compacts and flushes for region at 1732282541547Disabling writes for close at 1732282541547Writing region close event to WAL at 1732282541547Closed at 1732282541547 2024-11-22T13:35:41,549 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T13:35:41,550 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732282541549"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732282541549"}]},"ts":"1732282541549"} 2024-11-22T13:35:41,552 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T13:35:41,553 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T13:35:41,553 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282541553"}]},"ts":"1732282541553"} 2024-11-22T13:35:41,555 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-22T13:35:41,556 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2a7538bdac35a5c8f19a45c3de9ff4c0, ASSIGN}] 2024-11-22T13:35:41,557 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2a7538bdac35a5c8f19a45c3de9ff4c0, ASSIGN 2024-11-22T13:35:41,558 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2a7538bdac35a5c8f19a45c3de9ff4c0, ASSIGN; state=OFFLINE, location=e025332d312f,41959,1732282540115; forceNewPlan=false, retain=false 2024-11-22T13:35:41,623 INFO [RS:1;e025332d312f:40403 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C40403%2C1732282541385, suffix=, logDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385, archiveDir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs, maxLogs=32 2024-11-22T13:35:41,624 INFO [RS:1;e025332d312f:40403 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C40403%2C1732282541385.1732282541623 2024-11-22T13:35:41,632 INFO [RS:1;e025332d312f:40403 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 2024-11-22T13:35:41,633 DEBUG [RS:1;e025332d312f:40403 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33233:33233),(127.0.0.1/127.0.0.1:38501:38501)] 2024-11-22T13:35:41,709 INFO [e025332d312f:35487 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-22T13:35:41,710 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2a7538bdac35a5c8f19a45c3de9ff4c0, regionState=OPENING, regionLocation=e025332d312f,41959,1732282540115 2024-11-22T13:35:41,715 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2a7538bdac35a5c8f19a45c3de9ff4c0, ASSIGN because future has completed 2024-11-22T13:35:41,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2a7538bdac35a5c8f19a45c3de9ff4c0, server=e025332d312f,41959,1732282540115}] 2024-11-22T13:35:41,876 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:35:41,876 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 2a7538bdac35a5c8f19a45c3de9ff4c0, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:35:41,877 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,877 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:35:41,878 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,878 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,880 INFO [StoreOpener-2a7538bdac35a5c8f19a45c3de9ff4c0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,883 INFO [StoreOpener-2a7538bdac35a5c8f19a45c3de9ff4c0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2a7538bdac35a5c8f19a45c3de9ff4c0 columnFamilyName info 2024-11-22T13:35:41,883 DEBUG [StoreOpener-2a7538bdac35a5c8f19a45c3de9ff4c0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:35:41,884 INFO [StoreOpener-2a7538bdac35a5c8f19a45c3de9ff4c0-1 {}] regionserver.HStore(327): Store=2a7538bdac35a5c8f19a45c3de9ff4c0/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:35:41,884 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,885 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,886 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,886 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,886 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,889 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,892 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:35:41,893 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 2a7538bdac35a5c8f19a45c3de9ff4c0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801136, jitterRate=0.018698230385780334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T13:35:41,893 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:41,894 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: Running coprocessor pre-open hook at 1732282541878Writing region info on filesystem at 1732282541878Initializing all the Stores at 1732282541879 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282541879Cleaning up temporary data from old regions at 1732282541886 (+7 ms)Running coprocessor post-open hooks at 1732282541893 (+7 ms)Region opened successfully at 1732282541894 (+1 ms) 2024-11-22T13:35:41,895 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0., pid=6, masterSystemTime=1732282541871 2024-11-22T13:35:41,898 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:35:41,898 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:35:41,899 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=2a7538bdac35a5c8f19a45c3de9ff4c0, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,41959,1732282540115 2024-11-22T13:35:41,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2a7538bdac35a5c8f19a45c3de9ff4c0, server=e025332d312f,41959,1732282540115 because future has completed 2024-11-22T13:35:41,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T13:35:41,908 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 2a7538bdac35a5c8f19a45c3de9ff4c0, server=e025332d312f,41959,1732282540115 in 188 msec 2024-11-22T13:35:41,913 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T13:35:41,913 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=2a7538bdac35a5c8f19a45c3de9ff4c0, ASSIGN in 352 msec 2024-11-22T13:35:41,914 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T13:35:41,914 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282541914"}]},"ts":"1732282541914"} 2024-11-22T13:35:41,917 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-22T13:35:41,919 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T13:35:41,921 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 397 msec 2024-11-22T13:35:46,734 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T13:35:46,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:46,763 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:46,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:46,766 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:35:46,774 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-22T13:35:46,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T13:35:46,898 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-22T13:35:51,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35487 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:35:51,549 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-22T13:35:51,549 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-22T13:35:51,556 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T13:35:51,556 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:35:51,571 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:51,575 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:35:51,575 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:35:51,575 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:35:51,575 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T13:35:51,576 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a28e14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:35:51,576 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a9d9bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:35:51,669 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@492554e1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/java.io.tmpdir/jetty-localhost-36853-hadoop-hdfs-3_4_1-tests_jar-_-any-14149796082301467835/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:51,669 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3f85c2b2{HTTP/1.1, (http/1.1)}{localhost:36853} 2024-11-22T13:35:51,669 INFO [Time-limited test {}] server.Server(415): Started @120849ms 2024-11-22T13:35:51,671 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:35:51,699 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:51,702 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:35:51,703 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:35:51,703 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:35:51,703 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T13:35:51,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f01ff52{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:35:51,704 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@574823ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:35:51,797 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@73024f00{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/java.io.tmpdir/jetty-localhost-43009-hadoop-hdfs-3_4_1-tests_jar-_-any-579390507406504775/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:51,797 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b21f544{HTTP/1.1, (http/1.1)}{localhost:43009} 2024-11-22T13:35:51,797 INFO [Time-limited test {}] server.Server(415): Started @120977ms 2024-11-22T13:35:51,798 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:35:51,829 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:35:51,832 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:35:51,833 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:35:51,833 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:35:51,833 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T13:35:51,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@184e2f57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:35:51,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5af75f98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:35:51,928 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6f8ca33c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/java.io.tmpdir/jetty-localhost-45747-hadoop-hdfs-3_4_1-tests_jar-_-any-12318141936992833686/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:51,928 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21cc8d76{HTTP/1.1, (http/1.1)}{localhost:45747} 2024-11-22T13:35:51,928 INFO [Time-limited test {}] server.Server(415): Started @121108ms 2024-11-22T13:35:51,929 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:35:53,112 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:53,112 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:53,132 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:35:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xafe792dfba741d4e with lease ID 0xfb4f9c7da85b9fcc: Processing first storage report for DS-3af42623-0692-4451-a113-46de71282c50 from datanode DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafe792dfba741d4e with lease ID 0xfb4f9c7da85b9fcc: from storage DS-3af42623-0692-4451-a113-46de71282c50 node DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T13:35:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xafe792dfba741d4e with lease ID 0xfb4f9c7da85b9fcc: Processing first storage report for DS-ee9d2e9d-4835-4567-99d6-8338423c563a from datanode DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:53,135 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xafe792dfba741d4e with lease ID 0xfb4f9c7da85b9fcc: from storage DS-ee9d2e9d-4835-4567-99d6-8338423c563a node DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:53,357 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data7/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:53,357 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data8/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:53,373 WARN [Thread-831 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:35:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f01f0409093ff99 with lease ID 0xfb4f9c7da85b9fcd: Processing first storage report for DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927 from datanode DatanodeRegistration(127.0.0.1:41767, datanodeUuid=9eb5c675-dc33-4372-83fc-9814ca496754, infoPort=45919, infoSecurePort=0, ipcPort=40017, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f01f0409093ff99 with lease ID 0xfb4f9c7da85b9fcd: from storage DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927 node DatanodeRegistration(127.0.0.1:41767, datanodeUuid=9eb5c675-dc33-4372-83fc-9814ca496754, infoPort=45919, infoSecurePort=0, ipcPort=40017, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T13:35:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3f01f0409093ff99 with lease ID 0xfb4f9c7da85b9fcd: Processing first storage report for DS-992072c4-de9d-4c69-800b-90074f33dcdc from datanode DatanodeRegistration(127.0.0.1:41767, datanodeUuid=9eb5c675-dc33-4372-83fc-9814ca496754, infoPort=45919, infoSecurePort=0, ipcPort=40017, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:53,375 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3f01f0409093ff99 with lease ID 0xfb4f9c7da85b9fcd: from storage DS-992072c4-de9d-4c69-800b-90074f33dcdc node DatanodeRegistration(127.0.0.1:41767, datanodeUuid=9eb5c675-dc33-4372-83fc-9814ca496754, infoPort=45919, infoSecurePort=0, ipcPort=40017, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:53,418 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data9/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:53,418 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data10/current/BP-64778002-172.17.0.2-1732282537642/current, will proceed with Du for space computation calculation, 2024-11-22T13:35:53,436 WARN [Thread-853 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:35:53,439 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ada19d5b12bfce0 with lease ID 0xfb4f9c7da85b9fce: Processing first storage report for DS-72b2ced2-faef-46e8-89ef-bae5da94d093 from datanode DatanodeRegistration(127.0.0.1:37621, datanodeUuid=2515c6c8-8eeb-4875-b761-6aa30cfd3cfd, infoPort=44259, infoSecurePort=0, ipcPort=37457, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:53,439 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ada19d5b12bfce0 with lease ID 0xfb4f9c7da85b9fce: from storage DS-72b2ced2-faef-46e8-89ef-bae5da94d093 node DatanodeRegistration(127.0.0.1:37621, datanodeUuid=2515c6c8-8eeb-4875-b761-6aa30cfd3cfd, infoPort=44259, infoSecurePort=0, ipcPort=37457, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:53,439 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7ada19d5b12bfce0 with lease ID 0xfb4f9c7da85b9fce: Processing first storage report for DS-124010ab-924c-4c5f-9496-56ea9c23d989 from datanode DatanodeRegistration(127.0.0.1:37621, datanodeUuid=2515c6c8-8eeb-4875-b761-6aa30cfd3cfd, infoPort=44259, infoSecurePort=0, ipcPort=37457, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642) 2024-11-22T13:35:53,439 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7ada19d5b12bfce0 with lease ID 0xfb4f9c7da85b9fce: from storage DS-124010ab-924c-4c5f-9496-56ea9c23d989 node DatanodeRegistration(127.0.0.1:37621, datanodeUuid=2515c6c8-8eeb-4875-b761-6aa30cfd3cfd, infoPort=44259, infoSecurePort=0, ipcPort=37457, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:35:53,464 WARN [ResponseProcessor for block BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,464 WARN [ResponseProcessor for block BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,464 WARN [ResponseProcessor for block BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,464 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta block BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:35:53,464 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 block BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:35:53,464 WARN [ResponseProcessor for block BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,465 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 block BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:35:53,465 WARN [PacketResponder: BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43315] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,465 WARN [PacketResponder: BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43315] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,465 WARN [PacketResponder: BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43315] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,465 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 block BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:35:53,465 WARN [PacketResponder: BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43315] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1731184442_22 at /127.0.0.1:35448 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35448 dst: /127.0.0.1:45457 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:44996 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44996 dst: /127.0.0.1:43315 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,466 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:45004 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45004 dst: /127.0.0.1:43315 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1731184442_22 at /127.0.0.1:44982 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44982 dst: /127.0.0.1:43315 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,467 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@544fa662{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:53,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35488 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35488 dst: /127.0.0.1:45457 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35474 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35474 dst: /127.0.0.1:45457 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-565990181_22 at /127.0.0.1:35512 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35512 dst: /127.0.0.1:45457 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,467 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-565990181_22 at /127.0.0.1:45038 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43315:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45038 dst: /127.0.0.1:43315 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,468 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3574ce3f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:53,468 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:53,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10a4d310{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:53,468 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d5daa57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:53,470 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:35:53,470 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:35:53,470 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:35:53,470 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-64778002-172.17.0.2-1732282537642 (Datanode Uuid f18fa2ec-361a-4869-ab7d-fbdeafde956b) service to localhost/127.0.0.1:46035 2024-11-22T13:35:53,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data3/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:53,471 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data4/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:53,471 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:35:53,472 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@5453938b {}] datanode.DataXceiver(331): 127.0.0.1:45457:DataXceiver error processing unknown operation src: /127.0.0.1:60642 dst: /127.0.0.1:45457 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,472 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@4ce51a0c {}] datanode.DataXceiver(331): 127.0.0.1:45457:DataXceiver error processing unknown operation src: /127.0.0.1:60644 dst: /127.0.0.1:45457 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,472 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 block BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,472 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 block BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,472 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta block BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,472 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:60648 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45457:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60648 dst: /127.0.0.1:45457 java.io.IOException: The stream is closed at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:117) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:914) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:53,473 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 block BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3e10767c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:53,476 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d2d9832{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:53,476 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:53,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48d478e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:53,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@651aa118{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:53,477 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:35:53,477 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:35:53,477 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-64778002-172.17.0.2-1732282537642 (Datanode Uuid 2fab4074-db1e-4f7c-946e-7ff502acb41d) service to localhost/127.0.0.1:46035 2024-11-22T13:35:53,477 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:35:53,478 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data1/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:53,478 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data2/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:53,478 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:35:53,481 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0., hostname=e025332d312f,41959,1732282540115, seqNum=2] 2024-11-22T13:35:53,483 ERROR [FSHLog-0-hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7-prefix:e025332d312f,41959,1732282540115 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,483 WARN [FSHLog-0-hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7-prefix:e025332d312f,41959,1732282540115 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,483 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C41959%2C1732282540115:(num 1732282540754) roll requested 2024-11-22T13:35:53,483 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41959%2C1732282540115.1732282553483 2024-11-22T13:35:53,486 WARN [Thread-904 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,486 WARN [Thread-904 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:35:53,486 WARN [Thread-904 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741838_1018 2024-11-22T13:35:53,487 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,488 WARN [Thread-904 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:35:53,494 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:53,495 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:53,495 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:53,495 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:53,495 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:53,495 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282553483 2024-11-22T13:35:53,495 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,496 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:53,496 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44259:44259),(127.0.0.1/127.0.0.1:45919:45919)] 2024-11-22T13:35:53,496 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 is not closed yet, will try archiving it next time 2024-11-22T13:35:53,497 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-22T13:35:53,497 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-22T13:35:53,497 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 2024-11-22T13:35:53,500 WARN [IPC Server handler 0 on default port 46035 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-22T13:35:53,503 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 after 4ms 2024-11-22T13:35:54,242 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:55,487 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:55,496 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:55,498 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282553483 2024-11-22T13:35:55,499 WARN [ResponseProcessor for block BP-64778002-172.17.0.2-1732282537642:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-64778002-172.17.0.2-1732282537642:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:55,499 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282553483 block BP-64778002-172.17.0.2-1732282537642:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:35:55,500 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:47972 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:37621:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47972 dst: /127.0.0.1:37621 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:55,500 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:50414 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:41767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50414 dst: /127.0.0.1:41767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:55,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6f8ca33c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:55,543 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21cc8d76{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:55,543 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:55,543 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5af75f98{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:55,543 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@184e2f57{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:55,544 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:35:55,544 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:35:55,544 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-64778002-172.17.0.2-1732282537642 (Datanode Uuid 2515c6c8-8eeb-4875-b761-6aa30cfd3cfd) service to localhost/127.0.0.1:46035 2024-11-22T13:35:55,544 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:35:55,545 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data9/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:55,545 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data10/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:55,545 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:35:56,242 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:57,488 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:57,497 WARN [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]] 2024-11-22T13:35:57,497 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:57,498 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C41959%2C1732282540115:(num 1732282553483) roll requested 2024-11-22T13:35:57,498 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41959%2C1732282540115.1732282557498 2024-11-22T13:35:57,505 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 after 4008ms 2024-11-22T13:35:57,506 WARN [Thread-913 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45457 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:57,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:50434 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data8]'}, localName='127.0.0.1:41767', datanodeUuid='9eb5c675-dc33-4372-83fc-9814ca496754', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741840_1022 to mirror 127.0.0.1:45457 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:57,507 WARN [Thread-913 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:35:57,507 WARN [Thread-913 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741840_1022 2024-11-22T13:35:57,507 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:50434 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T13:35:57,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:50434 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50434 dst: /127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:57,508 WARN [Thread-913 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:35:57,509 WARN [Thread-913 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:57,510 WARN [Thread-913 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:35:57,510 WARN [Thread-913 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741841_1023 2024-11-22T13:35:57,510 WARN [Thread-913 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:35:57,512 WARN [Thread-913 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43315 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:57,512 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:50448 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data8]'}, localName='127.0.0.1:41767', datanodeUuid='9eb5c675-dc33-4372-83fc-9814ca496754', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741842_1024 to mirror 127.0.0.1:43315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:57,512 WARN [Thread-913 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:35:57,512 WARN [Thread-913 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741842_1024 2024-11-22T13:35:57,512 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:50448 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T13:35:57,513 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:50448 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:41767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50448 dst: /127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:57,513 WARN [Thread-913 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:35:57,517 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:57,517 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:57,517 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:57,517 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:57,517 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:35:57,517 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282553483 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282557498 2024-11-22T13:35:57,518 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45919:45919),(127.0.0.1/127.0.0.1:33355:33355)] 2024-11-22T13:35:57,518 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 is not closed yet, will try archiving it next time 2024-11-22T13:35:57,518 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282553483 is not closed yet, will try archiving it next time 2024-11-22T13:35:57,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41767 is added to blk_1073741839_1021 (size=2431) 2024-11-22T13:35:57,550 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T13:35:57,920 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 is not closed yet, will try archiving it next time 2024-11-22T13:35:58,243 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:59,390 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7544f40b[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:41767, datanodeUuid=9eb5c675-dc33-4372-83fc-9814ca496754, infoPort=45919, infoSecurePort=0, ipcPort=40017, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741839_1021 to 127.0.0.1:37621 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:59,488 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:59,519 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:59,555 WARN [ResponseProcessor for block BP-64778002-172.17.0.2-1732282537642:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-64778002-172.17.0.2-1732282537642:blk_1073741843_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:59,556 WARN [DataStreamer for file /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282557498 block BP-64778002-172.17.0.2-1732282537642:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:35:59,557 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:50452 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:41767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50452 dst: /127.0.0.1:41767 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:59,558 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54438 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54438 dst: /127.0.0.1:38049 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:59,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@73024f00{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:35:59,593 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b21f544{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:35:59,593 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:35:59,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@574823ec{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:35:59,594 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f01ff52{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,STOPPED} 2024-11-22T13:35:59,597 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:35:59,597 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:35:59,597 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:35:59,597 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-64778002-172.17.0.2-1732282537642 (Datanode Uuid 9eb5c675-dc33-4372-83fc-9814ca496754) service to localhost/127.0.0.1:46035 2024-11-22T13:35:59,599 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data7/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:59,599 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data8/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:35:59,600 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:35:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41959 {}] regionserver.HRegion(8855): Flush requested on 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:35:59,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:35:59,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/b69be1aab9314ffb8af70a5bd36a7b83 is 1080, key is row0002/info:/1732282555546/Put/seqid=0 2024-11-22T13:35:59,632 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:59,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54456 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741844_1027 to mirror 127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:59,633 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:35:59,633 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741844_1027 2024-11-22T13:35:59,633 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54456 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:35:59,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54456 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54456 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:59,633 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:35:59,634 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:59,634 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:35:59,634 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741845_1028 2024-11-22T13:35:59,635 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:35:59,636 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:59,636 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:35:59,636 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741846_1029 2024-11-22T13:35:59,637 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:35:59,639 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43315 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:35:59,639 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54468 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741847_1030] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741847_1030 to mirror 127.0.0.1:43315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:59,639 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:35:59,639 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741847_1030 2024-11-22T13:35:59,639 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54468 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741847_1030] {}] datanode.BlockReceiver(316): Block 1073741847 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:35:59,639 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54468 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741847_1030] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54468 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:35:59,639 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:35:59,640 WARN [IPC Server handler 3 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T13:35:59,640 WARN [IPC Server handler 3 on default port 46035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T13:35:59,640 WARN [IPC Server handler 3 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T13:35:59,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741848_1031 (size=10347) 2024-11-22T13:36:00,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/b69be1aab9314ffb8af70a5bd36a7b83 2024-11-22T13:36:00,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/b69be1aab9314ffb8af70a5bd36a7b83 as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/b69be1aab9314ffb8af70a5bd36a7b83 2024-11-22T13:36:00,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/b69be1aab9314ffb8af70a5bd36a7b83, entries=5, sequenceid=11, filesize=10.1 K 2024-11-22T13:36:00,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 2a7538bdac35a5c8f19a45c3de9ff4c0 in 450ms, sequenceid=11, compaction requested=false 2024-11-22T13:36:00,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: 2024-11-22T13:36:00,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41959 {}] regionserver.HRegion(8855): Flush requested on 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:36:00,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-22T13:36:00,243 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:00,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/9ab3156e71e94af8aee17b756ff2c24b is 1080, key is row0007/info:/1732282559610/Put/seqid=0 2024-11-22T13:36:00,252 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54498 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741849_1032] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741849_1032 to mirror 127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:00,252 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:00,253 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:00,253 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54498 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741849_1032] {}] datanode.BlockReceiver(316): Block 1073741849 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:00,253 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741849_1032 2024-11-22T13:36:00,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54498 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741849_1032] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54498 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:00,253 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:00,255 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:00,255 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:36:00,255 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741850_1033 2024-11-22T13:36:00,256 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:36:00,257 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:00,257 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:36:00,257 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741851_1034 2024-11-22T13:36:00,258 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:36:00,259 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:00,260 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:36:00,260 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741852_1035 2024-11-22T13:36:00,260 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:36:00,261 WARN [IPC Server handler 0 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T13:36:00,261 WARN [IPC Server handler 0 on default port 46035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T13:36:00,261 WARN [IPC Server handler 0 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T13:36:00,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741853_1036 (size=12506) 2024-11-22T13:36:00,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/9ab3156e71e94af8aee17b756ff2c24b 2024-11-22T13:36:00,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/9ab3156e71e94af8aee17b756ff2c24b as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9ab3156e71e94af8aee17b756ff2c24b 2024-11-22T13:36:00,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9ab3156e71e94af8aee17b756ff2c24b, entries=7, sequenceid=24, filesize=12.2 K 2024-11-22T13:36:00,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 2a7538bdac35a5c8f19a45c3de9ff4c0 in 437ms, sequenceid=24, compaction requested=false 2024-11-22T13:36:00,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: 2024-11-22T13:36:00,680 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-22T13:36:00,680 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:00,680 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9ab3156e71e94af8aee17b756ff2c24b because midkey is the same as first or last row 2024-11-22T13:36:01,489 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,519 WARN [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]] 2024-11-22T13:36:01,519 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,520 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C41959%2C1732282540115:(num 1732282557498) roll requested 2024-11-22T13:36:01,521 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41959%2C1732282540115.1732282561520 2024-11-22T13:36:01,527 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,528 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:36:01,528 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741854_1037 2024-11-22T13:36:01,529 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:36:01,535 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,534 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54516 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741855_1038 to mirror 127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:01,535 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:01,535 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741855_1038 2024-11-22T13:36:01,535 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54516 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T13:36:01,535 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54516 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54516 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:01,536 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:01,538 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45457 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,538 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54528 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741856_1039 to mirror 127.0.0.1:45457 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:01,538 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:36:01,538 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54528 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T13:36:01,538 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741856_1039 2024-11-22T13:36:01,538 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54528 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54528 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:01,539 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:36:01,541 WARN [Thread-939 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37621 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54544 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741857_1040] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741857_1040 to mirror 127.0.0.1:37621 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:01,541 WARN [Thread-939 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:36:01,541 WARN [Thread-939 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741857_1040 2024-11-22T13:36:01,541 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54544 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741857_1040] {}] datanode.BlockReceiver(316): Block 1073741857 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T13:36:01,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54544 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741857_1040] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54544 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:01,542 WARN [Thread-939 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:36:01,542 WARN [IPC Server handler 1 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T13:36:01,542 WARN [IPC Server handler 1 on default port 46035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T13:36:01,543 WARN [IPC Server handler 1 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T13:36:01,545 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:01,545 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:01,545 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:01,545 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:01,545 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:01,546 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282557498 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282561520 2024-11-22T13:36:01,546 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33355:33355)] 2024-11-22T13:36:01,547 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 is not closed yet, will try archiving it next time 2024-11-22T13:36:01,547 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282557498 is not closed yet, will try archiving it next time 2024-11-22T13:36:01,547 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282553483 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs/e025332d312f%2C41959%2C1732282540115.1732282553483 2024-11-22T13:36:01,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741843_1026 (size=25992) 2024-11-22T13:36:01,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41959 {}] regionserver.HRegion(8855): Flush requested on 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:36:01,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T13:36:01,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/9d22b66ca78d4915b86d8f3f32974d1f is 1079, key is tmprow/info:/1732282561677/Put/seqid=0 2024-11-22T13:36:01,688 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,688 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:36:01,688 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741859_1042 2024-11-22T13:36:01,689 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:36:01,691 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,691 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:01,691 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741860_1043 2024-11-22T13:36:01,691 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:01,693 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,693 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:36:01,693 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741861_1044 2024-11-22T13:36:01,694 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:36:01,697 WARN [Thread-946 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43315 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:01,697 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54566 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741862_1045 to mirror 127.0.0.1:43315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:01,697 WARN [Thread-946 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:36:01,697 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54566 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:01,697 WARN [Thread-946 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741862_1045 2024-11-22T13:36:01,697 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:54566 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54566 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:01,698 WARN [Thread-946 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:36:01,699 WARN [IPC Server handler 2 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T13:36:01,699 WARN [IPC Server handler 2 on default port 46035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T13:36:01,699 WARN [IPC Server handler 2 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T13:36:01,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741863_1046 (size=6027) 2024-11-22T13:36:01,949 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 is not closed yet, will try archiving it next time 2024-11-22T13:36:02,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/9d22b66ca78d4915b86d8f3f32974d1f 2024-11-22T13:36:02,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/9d22b66ca78d4915b86d8f3f32974d1f as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9d22b66ca78d4915b86d8f3f32974d1f 2024-11-22T13:36:02,119 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9d22b66ca78d4915b86d8f3f32974d1f, entries=1, sequenceid=34, filesize=5.9 K 2024-11-22T13:36:02,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 2a7538bdac35a5c8f19a45c3de9ff4c0 in 441ms, sequenceid=34, compaction requested=true 2024-11-22T13:36:02,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: 2024-11-22T13:36:02,121 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-22T13:36:02,121 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:02,121 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9ab3156e71e94af8aee17b756ff2c24b because midkey is the same as first or last row 2024-11-22T13:36:02,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2a7538bdac35a5c8f19a45c3de9ff4c0:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:36:02,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:36:02,121 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:36:02,123 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:36:02,123 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HStore(1541): 2a7538bdac35a5c8f19a45c3de9ff4c0/info is initiating minor compaction (all files) 2024-11-22T13:36:02,123 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2a7538bdac35a5c8f19a45c3de9ff4c0/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:36:02,123 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/b69be1aab9314ffb8af70a5bd36a7b83, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9ab3156e71e94af8aee17b756ff2c24b, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9d22b66ca78d4915b86d8f3f32974d1f] into tmpdir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp, totalSize=28.2 K 2024-11-22T13:36:02,124 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.Compactor(225): Compacting b69be1aab9314ffb8af70a5bd36a7b83, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732282555546 2024-11-22T13:36:02,124 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9ab3156e71e94af8aee17b756ff2c24b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732282559610 2024-11-22T13:36:02,125 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9d22b66ca78d4915b86d8f3f32974d1f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732282561677 2024-11-22T13:36:02,138 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2a7538bdac35a5c8f19a45c3de9ff4c0#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:36:02,139 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/fe8096be53854675ad5e78658dc1c1fc is 1080, key is row0002/info:/1732282555546/Put/seqid=0 2024-11-22T13:36:02,141 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:02,141 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:36:02,141 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741864_1047 2024-11-22T13:36:02,142 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:36:02,143 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:02,143 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:36:02,143 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741865_1048 2024-11-22T13:36:02,144 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:36:02,145 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:02,145 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:02,145 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741866_1049 2024-11-22T13:36:02,146 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:02,148 WARN [Thread-953 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45457 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:02,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35970 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741867_1050 to mirror 127.0.0.1:45457 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:02,148 WARN [Thread-953 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:36:02,148 WARN [Thread-953 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741867_1050 2024-11-22T13:36:02,148 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35970 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:02,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35970 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35970 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:02,149 WARN [Thread-953 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:36:02,149 WARN [IPC Server handler 1 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T13:36:02,149 WARN [IPC Server handler 1 on default port 46035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T13:36:02,149 WARN [IPC Server handler 1 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T13:36:02,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741868_1051 (size=17994) 2024-11-22T13:36:02,244 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:02,568 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/fe8096be53854675ad5e78658dc1c1fc as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc 2024-11-22T13:36:02,578 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2a7538bdac35a5c8f19a45c3de9ff4c0/info of 2a7538bdac35a5c8f19a45c3de9ff4c0 into fe8096be53854675ad5e78658dc1c1fc(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: 2024-11-22T13:36:02,578 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0., storeName=2a7538bdac35a5c8f19a45c3de9ff4c0/info, priority=13, startTime=1732282562121; duration=0sec 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc because midkey is the same as first or last row 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc because midkey is the same as first or last row 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc because midkey is the same as first or last row 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:36:02,578 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2a7538bdac35a5c8f19a45c3de9ff4c0:info 2024-11-22T13:36:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41959 {}] regionserver.HRegion(8855): Flush requested on 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:36:03,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T13:36:03,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/f2688b18eb7949e6aacdd3f73e7e6fc3 is 1079, key is tmprow/info:/1732282563108/Put/seqid=0 2024-11-22T13:36:03,120 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,120 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:36:03,120 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741869_1052 2024-11-22T13:36:03,121 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:36:03,122 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,122 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:03,122 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741870_1053 2024-11-22T13:36:03,123 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:03,126 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37621 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,126 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35988 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741871_1054 to mirror 127.0.0.1:37621 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:03,126 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:36:03,126 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741871_1054 2024-11-22T13:36:03,126 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35988 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:03,126 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35988 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35988 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:03,127 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:36:03,129 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43315 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35996 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741872_1055 to mirror 127.0.0.1:43315 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:03,129 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:36:03,129 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741872_1055 2024-11-22T13:36:03,129 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35996 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:03,129 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:35996 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35996 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:03,130 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:36:03,131 WARN [IPC Server handler 2 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T13:36:03,131 WARN [IPC Server handler 2 on default port 46035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T13:36:03,131 WARN [IPC Server handler 2 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T13:36:03,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741873_1056 (size=6027) 2024-11-22T13:36:03,136 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@54fbe9d6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741848_1031 to 127.0.0.1:41767 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:03,136 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a3c1d39[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741853_1036 to 127.0.0.1:41767 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:03,490 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/f2688b18eb7949e6aacdd3f73e7e6fc3 2024-11-22T13:36:03,547 WARN [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]] 2024-11-22T13:36:03,547 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,547 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C41959%2C1732282540115:(num 1732282561520) roll requested 2024-11-22T13:36:03,548 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41959%2C1732282540115.1732282563547 2024-11-22T13:36:03,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/f2688b18eb7949e6aacdd3f73e7e6fc3 as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/f2688b18eb7949e6aacdd3f73e7e6fc3 2024-11-22T13:36:03,550 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,550 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:36:03,550 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741874_1057 2024-11-22T13:36:03,551 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:36:03,552 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,552 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK], DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:36:03,552 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741875_1058 2024-11-22T13:36:03,553 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:36:03,554 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,554 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:36:03,554 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741876_1059 2024-11-22T13:36:03,555 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:36:03,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/f2688b18eb7949e6aacdd3f73e7e6fc3, entries=1, sequenceid=45, filesize=5.9 K 2024-11-22T13:36:03,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 2a7538bdac35a5c8f19a45c3de9ff4c0 in 445ms, sequenceid=45, compaction requested=false 2024-11-22T13:36:03,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: 2024-11-22T13:36:03,556 WARN [Thread-968 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:03,556 WARN [Thread-968 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:03,556 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-22T13:36:03,556 WARN [Thread-968 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741877_1060 2024-11-22T13:36:03,556 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:03,556 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc because midkey is the same as first or last row 2024-11-22T13:36:03,557 WARN [Thread-968 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:03,557 WARN [IPC Server handler 4 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T13:36:03,557 WARN [IPC Server handler 4 on default port 46035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T13:36:03,558 WARN [IPC Server handler 4 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T13:36:03,560 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:03,560 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:03,560 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:03,560 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:03,560 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:03,561 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282561520 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282563547 2024-11-22T13:36:03,561 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33355:33355)] 2024-11-22T13:36:03,561 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 is not closed yet, will try archiving it next time 2024-11-22T13:36:03,561 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282561520 is not closed yet, will try archiving it next time 2024-11-22T13:36:03,562 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282557498 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs/e025332d312f%2C41959%2C1732282540115.1732282557498 2024-11-22T13:36:03,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741858_1041 (size=13591) 2024-11-22T13:36:03,963 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 is not closed yet, will try archiving it next time 2024-11-22T13:36:04,138 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a3c1d39[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741843_1026 to 127.0.0.1:43315 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:04,138 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@54fbe9d6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741863_1046 to 127.0.0.1:37621 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:04,244 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:04,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41959 {}] regionserver.HRegion(8855): Flush requested on 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:36:04,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T13:36:04,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/4d359dbce5af415d89868f838d7f0f06 is 1079, key is tmprow/info:/1732282564554/Put/seqid=0 2024-11-22T13:36:04,570 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:04,571 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:36:04,571 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741879_1062 2024-11-22T13:36:04,571 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:36:04,573 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:04,573 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:36:04,573 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741880_1063 2024-11-22T13:36:04,574 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:36:04,577 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:04,577 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:36026 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741881_1064 to mirror 127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:04,577 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:04,577 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741881_1064 2024-11-22T13:36:04,577 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:36026 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:04,577 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:36026 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36026 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:04,578 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:04,580 WARN [Thread-972 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:04,580 WARN [Thread-972 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:36:04,580 WARN [Thread-972 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741882_1065 2024-11-22T13:36:04,580 WARN [Thread-972 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:36:04,581 WARN [IPC Server handler 4 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T13:36:04,581 WARN [IPC Server handler 4 on default port 46035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T13:36:04,581 WARN [IPC Server handler 4 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T13:36:04,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741883_1066 (size=6027) 2024-11-22T13:36:04,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/4d359dbce5af415d89868f838d7f0f06 2024-11-22T13:36:05,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/4d359dbce5af415d89868f838d7f0f06 as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/4d359dbce5af415d89868f838d7f0f06 2024-11-22T13:36:05,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/4d359dbce5af415d89868f838d7f0f06, entries=1, sequenceid=55, filesize=5.9 K 2024-11-22T13:36:05,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 2a7538bdac35a5c8f19a45c3de9ff4c0 in 452ms, sequenceid=55, compaction requested=true 2024-11-22T13:36:05,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: 2024-11-22T13:36:05,010 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-22T13:36:05,010 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:05,010 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc because midkey is the same as first or last row 2024-11-22T13:36:05,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2a7538bdac35a5c8f19a45c3de9ff4c0:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:36:05,010 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:36:05,010 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:36:05,012 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:36:05,012 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HStore(1541): 2a7538bdac35a5c8f19a45c3de9ff4c0/info is initiating minor compaction (all files) 2024-11-22T13:36:05,012 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2a7538bdac35a5c8f19a45c3de9ff4c0/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:36:05,012 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/f2688b18eb7949e6aacdd3f73e7e6fc3, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/4d359dbce5af415d89868f838d7f0f06] into tmpdir=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp, totalSize=29.3 K 2024-11-22T13:36:05,012 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.Compactor(225): Compacting fe8096be53854675ad5e78658dc1c1fc, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732282555546 2024-11-22T13:36:05,013 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.Compactor(225): Compacting f2688b18eb7949e6aacdd3f73e7e6fc3, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732282563108 2024-11-22T13:36:05,014 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4d359dbce5af415d89868f838d7f0f06, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732282564554 2024-11-22T13:36:05,028 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2a7538bdac35a5c8f19a45c3de9ff4c0#info#compaction#24 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:36:05,029 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/be688046d4904bb6bcd4a27129fb90a0 is 1080, key is row0002/info:/1732282555546/Put/seqid=0 2024-11-22T13:36:05,030 WARN [Thread-977 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:05,031 WARN [Thread-977 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:05,031 WARN [Thread-977 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741884_1067 2024-11-22T13:36:05,031 WARN [Thread-977 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:05,032 WARN [Thread-977 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:05,032 WARN [Thread-977 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]) is bad. 2024-11-22T13:36:05,032 WARN [Thread-977 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741885_1068 2024-11-22T13:36:05,033 WARN [Thread-977 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43315,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK] 2024-11-22T13:36:05,034 WARN [Thread-977 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:05,034 WARN [Thread-977 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:36:05,034 WARN [Thread-977 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741886_1069 2024-11-22T13:36:05,035 WARN [Thread-977 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:36:05,037 WARN [Thread-977 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45457 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:05,037 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:36048 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741887_1070 to mirror 127.0.0.1:45457 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:05,037 WARN [Thread-977 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:36:05,037 WARN [Thread-977 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741887_1070 2024-11-22T13:36:05,037 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:36048 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:05,037 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:36048 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36048 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:05,037 WARN [Thread-977 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:36:05,038 WARN [IPC Server handler 4 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T13:36:05,038 WARN [IPC Server handler 4 on default port 46035 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T13:36:05,038 WARN [IPC Server handler 4 on default port 46035 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T13:36:05,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741888_1071 (size=18097) 2024-11-22T13:36:05,455 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/be688046d4904bb6bcd4a27129fb90a0 as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/be688046d4904bb6bcd4a27129fb90a0 2024-11-22T13:36:05,463 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 2a7538bdac35a5c8f19a45c3de9ff4c0/info of 2a7538bdac35a5c8f19a45c3de9ff4c0 into be688046d4904bb6bcd4a27129fb90a0(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: 2024-11-22T13:36:05,463 INFO [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0., storeName=2a7538bdac35a5c8f19a45c3de9ff4c0/info, priority=13, startTime=1732282565010; duration=0sec 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/be688046d4904bb6bcd4a27129fb90a0 because midkey is the same as first or last row 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/be688046d4904bb6bcd4a27129fb90a0 because midkey is the same as first or last row 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/be688046d4904bb6bcd4a27129fb90a0 because midkey is the same as first or last row 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:36:05,463 DEBUG [RS:0;e025332d312f:41959-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2a7538bdac35a5c8f19a45c3de9ff4c0:info 2024-11-22T13:36:05,490 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:05,562 WARN [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-22T13:36:05,562 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:05,590 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:36:05,594 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:36:05,595 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:36:05,595 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:36:05,595 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T13:36:05,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@179dab5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:36:05,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55126aa0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:36:05,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@593e8d90{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/java.io.tmpdir/jetty-localhost-40359-hadoop-hdfs-3_4_1-tests_jar-_-any-5351664415590069260/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:05,691 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@213456fa{HTTP/1.1, (http/1.1)}{localhost:40359} 2024-11-22T13:36:05,691 INFO [Time-limited test {}] server.Server(415): Started @134871ms 2024-11-22T13:36:05,692 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:36:06,132 WARN [Thread-996 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:36:06,135 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@54fbe9d6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741868_1051 to 127.0.0.1:37621 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:06,135 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a3c1d39[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741873_1056 to 127.0.0.1:41767 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:06,140 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xffc024f09ae0ae7 with lease ID 0xfb4f9c7da85b9fcf: from storage DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f node DatanodeRegistration(127.0.0.1:38567, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=46073, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:06,140 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xffc024f09ae0ae7 with lease ID 0xfb4f9c7da85b9fcf: from storage DS-0803e8e7-14c4-4860-865f-3228e2024d23 node DatanodeRegistration(127.0.0.1:38567, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=46073, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T13:36:06,245 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:07,138 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@54fbe9d6[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741883_1066 to 127.0.0.1:37621 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:07,138 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a3c1d39[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741858_1041 to 127.0.0.1:37621 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:07,491 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:07,562 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:08,245 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:09,135 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5a3c1d39[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38049, datanodeUuid=3bc36fb6-793d-4aae-a82e-3c85098b9a2b, infoPort=33355, infoSecurePort=0, ipcPort=40033, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741888_1071 to 127.0.0.1:37621 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:09,491 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:09,563 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:09,911 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T13:36:10,246 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:10,481 ERROR [FSHLog-0-hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData-prefix:e025332d312f,35487,1732282539938 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:10,481 WARN [FSHLog-0-hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData-prefix:e025332d312f,35487,1732282539938 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:10,481 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C35487%2C1732282539938:(num 1732282540263) roll requested 2024-11-22T13:36:10,482 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C35487%2C1732282539938.1732282570482 2024-11-22T13:36:10,488 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:10,488 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK], DatanodeInfoWithStorage[127.0.0.1:38567,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK]) is bad. 2024-11-22T13:36:10,488 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741889_1072 2024-11-22T13:36:10,489 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37621,DS-72b2ced2-faef-46e8-89ef-bae5da94d093,DISK] 2024-11-22T13:36:10,490 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1073 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:10,490 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741890_1073 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]) is bad. 2024-11-22T13:36:10,490 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741890_1073 2024-11-22T13:36:10,491 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK] 2024-11-22T13:36:10,492 WARN [Thread-1016 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:10,492 WARN [Thread-1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741891_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:38567,DS-3e0c77e8-2301-4a5f-b319-a97693da0d0f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:10,492 WARN [Thread-1016 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741891_1074 2024-11-22T13:36:10,493 WARN [Thread-1016 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:10,497 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:10,497 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:10,498 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:10,498 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:10,498 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:10,498 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282570482 2024-11-22T13:36:10,499 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:10,499 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:10,499 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 2024-11-22T13:36:10,499 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33355:33355),(127.0.0.1/127.0.0.1:46073:46073)] 2024-11-22T13:36:10,499 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 is not closed yet, will try archiving it next time 2024-11-22T13:36:10,499 WARN [IPC Server handler 2 on default port 46035 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741830_1006 2024-11-22T13:36:10,500 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 after 1ms 2024-11-22T13:36:11,492 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:11,563 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:13,492 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:13,564 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:14,502 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 after 4003ms 2024-11-22T13:36:15,493 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:15,564 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:16,154 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@5c8459ce {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-64778002-172.17.0.2-1732282537642:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:45457,null,null]) java.net.ConnectException: Call From e025332d312f/172.17.0.2 to localhost:38707 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T13:36:16,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741833_1020 (size=455) 2024-11-22T13:36:16,535 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282540754 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs/e025332d312f%2C41959%2C1732282540115.1732282540754 2024-11-22T13:36:16,538 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282561520 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs/e025332d312f%2C41959%2C1732282540115.1732282561520 2024-11-22T13:36:17,493 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:17,565 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:18,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741833_1020 (size=455) 2024-11-22T13:36:19,406 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41959%2C1732282540115.1732282579406 2024-11-22T13:36:19,411 WARN [Thread-1027 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,411 WARN [Thread-1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK], DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:19,411 WARN [Thread-1027 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741893_1077 2024-11-22T13:36:19,412 WARN [Thread-1027 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:19,420 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,420 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,421 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,421 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,421 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,421 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282563547 with entries=12, filesize=11.46 KB; new WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282579406 2024-11-22T13:36:19,422 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46073:46073),(127.0.0.1/127.0.0.1:33355:33355)] 2024-11-22T13:36:19,422 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282563547 is not closed yet, will try archiving it next time 2024-11-22T13:36:19,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741878_1061 (size=11743) 2024-11-22T13:36:19,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41959 {}] regionserver.HRegion(8855): Flush requested on 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:36:19,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T13:36:19,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/57aa947bbeb44351a7d30a9ebda7c11c is 1080, key is row0013/info:/1732282579424/Put/seqid=0 2024-11-22T13:36:19,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741895_1079 (size=9267) 2024-11-22T13:36:19,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741895_1079 (size=9267) 2024-11-22T13:36:19,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/57aa947bbeb44351a7d30a9ebda7c11c 2024-11-22T13:36:19,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/57aa947bbeb44351a7d30a9ebda7c11c as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/57aa947bbeb44351a7d30a9ebda7c11c 2024-11-22T13:36:19,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/57aa947bbeb44351a7d30a9ebda7c11c, entries=4, sequenceid=66, filesize=9.0 K 2024-11-22T13:36:19,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 2a7538bdac35a5c8f19a45c3de9ff4c0 in 28ms, sequenceid=66, compaction requested=false 2024-11-22T13:36:19,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: 2024-11-22T13:36:19,460 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-11-22T13:36:19,460 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:36:19,460 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/be688046d4904bb6bcd4a27129fb90a0 because midkey is the same as first or last row 2024-11-22T13:36:19,494 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,565 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-22T13:36:19,566 INFO [regionserver/e025332d312f:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T13:36:19,653 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:36:19,653 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:36:19,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:19,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:19,654 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T13:36:19,655 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T13:36:19,655 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1900672053, stopped=false 2024-11-22T13:36:19,655 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e025332d312f,35487,1732282539938 2024-11-22T13:36:19,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:36:19,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:36:19,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:19,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:19,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:36:19,714 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:36:19,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:19,715 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:36:19,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:36:19,716 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:36:19,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:36:19,716 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:19,716 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:36:19,717 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e025332d312f,41959,1732282540115' ***** 2024-11-22T13:36:19,717 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T13:36:19,717 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e025332d312f,40403,1732282541385' ***** 2024-11-22T13:36:19,717 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T13:36:19,717 INFO [RS:0;e025332d312f:41959 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T13:36:19,718 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T13:36:19,718 INFO [RS:0;e025332d312f:41959 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T13:36:19,718 INFO [RS:0;e025332d312f:41959 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T13:36:19,718 INFO [RS:1;e025332d312f:40403 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T13:36:19,718 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(3091): Received CLOSE for 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:36:19,718 INFO [RS:1;e025332d312f:40403 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T13:36:19,718 INFO [RS:1;e025332d312f:40403 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T13:36:19,718 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T13:36:19,718 INFO [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(959): stopping server e025332d312f,40403,1732282541385 2024-11-22T13:36:19,718 INFO [RS:1;e025332d312f:40403 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:36:19,718 INFO [RS:1;e025332d312f:40403 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;e025332d312f:40403. 2024-11-22T13:36:19,718 DEBUG [RS:1;e025332d312f:40403 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:36:19,718 DEBUG [RS:1;e025332d312f:40403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:19,718 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(959): stopping server e025332d312f,41959,1732282540115 2024-11-22T13:36:19,719 INFO [RS:0;e025332d312f:41959 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:36:19,719 INFO [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(976): stopping server e025332d312f,40403,1732282541385; all regions closed. 2024-11-22T13:36:19,719 INFO [RS:0;e025332d312f:41959 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e025332d312f:41959. 2024-11-22T13:36:19,719 DEBUG [RS:0;e025332d312f:41959 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:36:19,719 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2a7538bdac35a5c8f19a45c3de9ff4c0, disabling compactions & flushes 2024-11-22T13:36:19,719 DEBUG [RS:0;e025332d312f:41959 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:19,719 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:36:19,719 INFO [RS:0;e025332d312f:41959 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T13:36:19,719 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:36:19,719 INFO [RS:0;e025332d312f:41959 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T13:36:19,719 INFO [RS:0;e025332d312f:41959 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T13:36:19,719 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. after waiting 0 ms 2024-11-22T13:36:19,719 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,719 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:36:19,719 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T13:36:19,719 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,719 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,719 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-22T13:36:19,719 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T13:36:19,719 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,719 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1325): Online Regions={2a7538bdac35a5c8f19a45c3de9ff4c0=TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T13:36:19,720 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,720 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2a7538bdac35a5c8f19a45c3de9ff4c0 2024-11-22T13:36:19,720 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:36:19,720 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:36:19,720 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:36:19,720 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:36:19,720 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:36:19,720 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,720 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-22T13:36:19,720 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,720 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 2024-11-22T13:36:19,720 ERROR [FSHLog-0-hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7-prefix:e025332d312f,41959,1732282540115.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,721 WARN [FSHLog-0-hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7-prefix:e025332d312f,41959,1732282540115.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,721 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C41959%2C1732282540115.meta:.meta(num 1732282541217) roll requested 2024-11-22T13:36:19,721 WARN [IPC Server handler 3 on default port 46035 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-22T13:36:19,721 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41959%2C1732282540115.meta.1732282579721.meta 2024-11-22T13:36:19,721 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 after 1ms 2024-11-22T13:36:19,725 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/3ac5ef8a934a42b79cbab9d68b356bda is 1080, key is row0016/info:/1732282579432/Put/seqid=0 2024-11-22T13:36:19,729 WARN [Thread-1042 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,729 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55090 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741897_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741897_1082 to mirror 127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:19,729 WARN [Thread-1042 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:19,729 WARN [Thread-1042 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741897_1082 2024-11-22T13:36:19,729 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55090 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741897_1082] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:19,729 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55090 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741897_1082] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55090 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:19,730 WARN [Thread-1042 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:19,730 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,730 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,730 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,730 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,730 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:19,730 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282579721.meta 2024-11-22T13:36:19,731 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,731 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45457,DS-b45bda0f-803e-40b5-b549-7b8f9276a514,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,731 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta 2024-11-22T13:36:19,731 WARN [IPC Server handler 3 on default port 46035 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta has not been closed. Lease recovery is in progress. RecoveryId = 1084 for block blk_1073741834_1010 2024-11-22T13:36:19,731 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta after 0ms 2024-11-22T13:36:19,734 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46073:46073),(127.0.0.1/127.0.0.1:33355:33355)] 2024-11-22T13:36:19,734 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta is not closed yet, will try archiving it next time 2024-11-22T13:36:19,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741898_1083 (size=13583) 2024-11-22T13:36:19,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741898_1083 (size=13583) 2024-11-22T13:36:19,744 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/3ac5ef8a934a42b79cbab9d68b356bda 2024-11-22T13:36:19,752 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/.tmp/info/3ac5ef8a934a42b79cbab9d68b356bda as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/3ac5ef8a934a42b79cbab9d68b356bda 2024-11-22T13:36:19,756 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/.tmp/info/25ec276367bf438582de324031e9033c is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0./info:regioninfo/1732282541899/Put/seqid=0 2024-11-22T13:36:19,759 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:19,759 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55114 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741899_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741899_1085 to mirror 127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:19,759 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:19,759 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55114 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741899_1085] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:19,759 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741899_1085 2024-11-22T13:36:19,759 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55114 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741899_1085] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55114 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:19,760 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/3ac5ef8a934a42b79cbab9d68b356bda, entries=8, sequenceid=77, filesize=13.3 K 2024-11-22T13:36:19,760 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:19,761 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 2a7538bdac35a5c8f19a45c3de9ff4c0 in 42ms, sequenceid=77, compaction requested=true 2024-11-22T13:36:19,764 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/b69be1aab9314ffb8af70a5bd36a7b83, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9ab3156e71e94af8aee17b756ff2c24b, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9d22b66ca78d4915b86d8f3f32974d1f, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/f2688b18eb7949e6aacdd3f73e7e6fc3, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/4d359dbce5af415d89868f838d7f0f06] to archive 2024-11-22T13:36:19,766 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T13:36:19,769 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/b69be1aab9314ffb8af70a5bd36a7b83 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/b69be1aab9314ffb8af70a5bd36a7b83 2024-11-22T13:36:19,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741900_1086 (size=7089) 2024-11-22T13:36:19,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741900_1086 (size=7089) 2024-11-22T13:36:19,771 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9ab3156e71e94af8aee17b756ff2c24b to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9ab3156e71e94af8aee17b756ff2c24b 2024-11-22T13:36:19,773 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/fe8096be53854675ad5e78658dc1c1fc 2024-11-22T13:36:19,774 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9d22b66ca78d4915b86d8f3f32974d1f to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/9d22b66ca78d4915b86d8f3f32974d1f 2024-11-22T13:36:19,775 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/f2688b18eb7949e6aacdd3f73e7e6fc3 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/f2688b18eb7949e6aacdd3f73e7e6fc3 2024-11-22T13:36:19,777 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/4d359dbce5af415d89868f838d7f0f06 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/info/4d359dbce5af415d89868f838d7f0f06 2024-11-22T13:36:19,777 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e025332d312f:35487 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T13:36:19,778 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b69be1aab9314ffb8af70a5bd36a7b83=10347, 9ab3156e71e94af8aee17b756ff2c24b=12506, fe8096be53854675ad5e78658dc1c1fc=17994, 9d22b66ca78d4915b86d8f3f32974d1f=6027, f2688b18eb7949e6aacdd3f73e7e6fc3=6027, 4d359dbce5af415d89868f838d7f0f06=6027] 2024-11-22T13:36:19,785 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/default/TestLogRolling-testLogRollOnDatanodeDeath/2a7538bdac35a5c8f19a45c3de9ff4c0/recovered.edits/80.seqid, newMaxSeqId=80, maxSeqId=1 2024-11-22T13:36:19,785 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:36:19,785 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2a7538bdac35a5c8f19a45c3de9ff4c0: Waiting for close lock at 1732282579719Running coprocessor pre-close hooks at 1732282579719Disabling compacts and flushes for region at 1732282579719Disabling writes for close at 1732282579719Obtaining lock to block concurrent updates at 1732282579719Preparing flush snapshotting stores in 2a7538bdac35a5c8f19a45c3de9ff4c0 at 1732282579719Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1732282579720 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. at 1732282579721 (+1 ms)Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0/info: creating writer at 1732282579721Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0/info: appending metadata at 1732282579724 (+3 ms)Flushing 2a7538bdac35a5c8f19a45c3de9ff4c0/info: closing flushed file at 1732282579724Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25117d80: reopening flushed file at 1732282579751 (+27 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 2a7538bdac35a5c8f19a45c3de9ff4c0 in 42ms, sequenceid=77, compaction requested=true at 1732282579761 (+10 ms)Writing region close event to WAL at 1732282579780 (+19 ms)Running coprocessor post-close hooks at 1732282579785 (+5 ms)Closed at 1732282579785 2024-11-22T13:36:19,786 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732282541521.2a7538bdac35a5c8f19a45c3de9ff4c0. 2024-11-22T13:36:19,825 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.1732282563547 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs/e025332d312f%2C41959%2C1732282540115.1732282563547 2024-11-22T13:36:19,920 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T13:36:20,120 DEBUG [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T13:36:20,171 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/.tmp/info/25ec276367bf438582de324031e9033c 2024-11-22T13:36:20,199 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/.tmp/ns/d1673b3432c24bd9bd5f6c40d043b855 is 43, key is default/ns:d/1732282541307/Put/seqid=0 2024-11-22T13:36:20,201 WARN [Thread-1063 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:20,201 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55142 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741901_1087] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741901_1087 to mirror 127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:20,202 WARN [Thread-1063 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:20,202 WARN [Thread-1063 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741901_1087 2024-11-22T13:36:20,202 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55142 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741901_1087] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:20,202 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55142 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741901_1087] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55142 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:20,202 WARN [Thread-1063 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:20,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741902_1088 (size=5153) 2024-11-22T13:36:20,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741902_1088 (size=5153) 2024-11-22T13:36:20,207 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/.tmp/ns/d1673b3432c24bd9bd5f6c40d043b855 2024-11-22T13:36:20,229 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/.tmp/table/4ee77ee381e14cd797581c167d9d74f9 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732282541914/Put/seqid=0 2024-11-22T13:36:20,232 WARN [Thread-1070 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:41767 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:20,232 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55156 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741903_1089] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6]'}, localName='127.0.0.1:38049', datanodeUuid='3bc36fb6-793d-4aae-a82e-3c85098b9a2b', xmitsInProgress=0}:Exception transferring block BP-64778002-172.17.0.2-1732282537642:blk_1073741903_1089 to mirror 127.0.0.1:41767 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:20,232 WARN [Thread-1070 {}] hdfs.DataStreamer(1731): Error Recovery for BP-64778002-172.17.0.2-1732282537642:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38049,DS-3af42623-0692-4451-a113-46de71282c50,DISK], DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK]) is bad. 2024-11-22T13:36:20,232 WARN [Thread-1070 {}] hdfs.DataStreamer(1850): Abandoning BP-64778002-172.17.0.2-1732282537642:blk_1073741903_1089 2024-11-22T13:36:20,232 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55156 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741903_1089] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T13:36:20,232 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-527953348_22 at /127.0.0.1:55156 [Receiving block BP-64778002-172.17.0.2-1732282537642:blk_1073741903_1089] {}] datanode.DataXceiver(331): 127.0.0.1:38049:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55156 dst: /127.0.0.1:38049 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:20,233 WARN [Thread-1070 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:41767,DS-ba437e2b-2718-4e00-9d45-cb2ddbea0927,DISK] 2024-11-22T13:36:20,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741904_1090 (size=5424) 2024-11-22T13:36:20,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741904_1090 (size=5424) 2024-11-22T13:36:20,238 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/.tmp/table/4ee77ee381e14cd797581c167d9d74f9 2024-11-22T13:36:20,245 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/.tmp/info/25ec276367bf438582de324031e9033c as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/info/25ec276367bf438582de324031e9033c 2024-11-22T13:36:20,252 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/info/25ec276367bf438582de324031e9033c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T13:36:20,254 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/.tmp/ns/d1673b3432c24bd9bd5f6c40d043b855 as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/ns/d1673b3432c24bd9bd5f6c40d043b855 2024-11-22T13:36:20,260 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/ns/d1673b3432c24bd9bd5f6c40d043b855, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T13:36:20,262 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/.tmp/table/4ee77ee381e14cd797581c167d9d74f9 as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/table/4ee77ee381e14cd797581c167d9d74f9 2024-11-22T13:36:20,268 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/table/4ee77ee381e14cd797581c167d9d74f9, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T13:36:20,270 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 549ms, sequenceid=11, compaction requested=false 2024-11-22T13:36:20,275 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T13:36:20,275 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:36:20,276 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:36:20,276 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282579720Running coprocessor pre-close hooks at 1732282579720Disabling compacts and flushes for region at 1732282579720Disabling writes for close at 1732282579720Obtaining lock to block concurrent updates at 1732282579720Preparing flush snapshotting stores in 1588230740 at 1732282579720Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732282579720Flushing stores of hbase:meta,,1.1588230740 at 1732282579734 (+14 ms)Flushing 1588230740/info: creating writer at 1732282579734Flushing 1588230740/info: appending metadata at 1732282579756 (+22 ms)Flushing 1588230740/info: closing flushed file at 1732282579756Flushing 1588230740/ns: creating writer at 1732282580182 (+426 ms)Flushing 1588230740/ns: appending metadata at 1732282580198 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732282580198Flushing 1588230740/table: creating writer at 1732282580213 (+15 ms)Flushing 1588230740/table: appending metadata at 1732282580229 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732282580229Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a54253: reopening flushed file at 1732282580244 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7190dddc: reopening flushed file at 1732282580253 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a090175: reopening flushed file at 1732282580261 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 549ms, sequenceid=11, compaction requested=false at 1732282580270 (+9 ms)Writing region close event to WAL at 1732282580271 (+1 ms)Running coprocessor post-close hooks at 1732282580275 (+4 ms)Closed at 1732282580276 (+1 ms) 2024-11-22T13:36:20,276 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T13:36:20,320 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(976): stopping server e025332d312f,41959,1732282540115; all regions closed. 2024-11-22T13:36:20,321 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:20,321 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:20,321 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:20,321 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:20,321 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:20,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741896_1081 (size=825) 2024-11-22T13:36:20,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741896_1081 (size=825) 2024-11-22T13:36:20,486 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T13:36:20,487 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T13:36:20,619 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T13:36:20,619 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T13:36:20,621 INFO [regionserver/e025332d312f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:36:21,141 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6d126609[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38567, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=46073, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741836_1012 to 127.0.0.1:41767 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:21,142 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3caba681[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38567, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=46073, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741832_1008 to 127.0.0.1:41767 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:21,326 INFO [master/e025332d312f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T13:36:21,326 INFO [master/e025332d312f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T13:36:21,489 INFO [regionserver/e025332d312f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:36:22,140 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6d126609[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38567, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=46073, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741826_1002 to 127.0.0.1:41767 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:22,140 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3caba681[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38567, datanodeUuid=f18fa2ec-361a-4869-ab7d-fbdeafde956b, infoPort=46073, infoSecurePort=0, ipcPort=34281, storageInfo=lv=-57;cid=testClusterID;nsid=377491915;c=1732282537642):Failed to transfer BP-64778002-172.17.0.2-1732282537642:blk_1073741828_1004 to 127.0.0.1:41767 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:23,723 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 after 4003ms 2024-11-22T13:36:23,733 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta after 4002ms 2024-11-22T13:36:24,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741878_1061 (size=11743) 2024-11-22T13:36:24,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:36:24,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:36:24,720 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T13:36:24,725 DEBUG [RS:1;e025332d312f:40403 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs 2024-11-22T13:36:24,726 INFO [RS:1;e025332d312f:40403 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C40403%2C1732282541385:(num 1732282541623) 2024-11-22T13:36:24,726 DEBUG [RS:1;e025332d312f:40403 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:24,726 INFO [RS:1;e025332d312f:40403 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:36:24,726 INFO [RS:1;e025332d312f:40403 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:36:24,727 INFO [RS:1;e025332d312f:40403 {}] hbase.ChoreService(370): Chore service for: regionserver/e025332d312f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T13:36:24,727 INFO [RS:1;e025332d312f:40403 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T13:36:24,727 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:36:24,727 INFO [RS:1;e025332d312f:40403 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T13:36:24,727 INFO [RS:1;e025332d312f:40403 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T13:36:24,728 INFO [RS:1;e025332d312f:40403 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:36:24,728 INFO [RS:1;e025332d312f:40403 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40403 2024-11-22T13:36:24,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:36:24,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e025332d312f,40403,1732282541385 2024-11-22T13:36:24,787 INFO [RS:1;e025332d312f:40403 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:36:24,790 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:24,796 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e025332d312f,40403,1732282541385] 2024-11-22T13:36:24,807 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e025332d312f,40403,1732282541385 already deleted, retry=false 2024-11-22T13:36:24,807 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e025332d312f,40403,1732282541385 expired; onlineServers=1 2024-11-22T13:36:24,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:24,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:24,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:24,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:24,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:24,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:24,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:24,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:36:24,897 INFO [RS:1;e025332d312f:40403 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:36:24,897 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40403-0x10162c1db350002, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:36:24,897 INFO [RS:1;e025332d312f:40403 {}] regionserver.HRegionServer(1031): Exiting; stopping=e025332d312f,40403,1732282541385; zookeeper connection closed. 2024-11-22T13:36:24,897 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@300ad40c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@300ad40c 2024-11-22T13:36:25,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:36:25,322 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T13:36:25,325 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T13:36:25,332 DEBUG [RS:0;e025332d312f:41959 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs 2024-11-22T13:36:25,333 INFO [RS:0;e025332d312f:41959 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C41959%2C1732282540115.meta:.meta(num 1732282579721) 2024-11-22T13:36:25,334 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,334 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,334 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,334 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,335 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741894_1078 (size=14682) 2024-11-22T13:36:25,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741894_1078 (size=14682) 2024-11-22T13:36:25,339 DEBUG [RS:0;e025332d312f:41959 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs 2024-11-22T13:36:25,339 INFO [RS:0;e025332d312f:41959 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C41959%2C1732282540115:(num 1732282579406) 2024-11-22T13:36:25,339 DEBUG [RS:0;e025332d312f:41959 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:25,339 INFO [RS:0;e025332d312f:41959 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:36:25,339 INFO [RS:0;e025332d312f:41959 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:36:25,339 INFO [RS:0;e025332d312f:41959 {}] hbase.ChoreService(370): Chore service for: regionserver/e025332d312f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T13:36:25,339 INFO [RS:0;e025332d312f:41959 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:36:25,339 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:36:25,339 INFO [RS:0;e025332d312f:41959 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41959 2024-11-22T13:36:25,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:25,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:25,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:25,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:25,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:25,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:25,356 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:25,358 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:25,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:36:25,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e025332d312f,41959,1732282540115 2024-11-22T13:36:25,386 INFO [RS:0;e025332d312f:41959 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:36:25,396 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e025332d312f,41959,1732282540115] 2024-11-22T13:36:25,407 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e025332d312f,41959,1732282540115 already deleted, retry=false 2024-11-22T13:36:25,407 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e025332d312f,41959,1732282540115 expired; onlineServers=0 2024-11-22T13:36:25,407 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e025332d312f,35487,1732282539938' ***** 2024-11-22T13:36:25,407 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T13:36:25,407 INFO [M:0;e025332d312f:35487 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:36:25,407 INFO [M:0;e025332d312f:35487 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:36:25,408 DEBUG [M:0;e025332d312f:35487 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T13:36:25,408 DEBUG [M:0;e025332d312f:35487 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T13:36:25,408 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T13:36:25,408 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282540479 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282540479,5,FailOnTimeoutGroup] 2024-11-22T13:36:25,408 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282540479 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282540479,5,FailOnTimeoutGroup] 2024-11-22T13:36:25,408 INFO [M:0;e025332d312f:35487 {}] hbase.ChoreService(370): Chore service for: master/e025332d312f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T13:36:25,408 INFO [M:0;e025332d312f:35487 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:36:25,409 DEBUG [M:0;e025332d312f:35487 {}] master.HMaster(1795): Stopping service threads 2024-11-22T13:36:25,409 INFO [M:0;e025332d312f:35487 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T13:36:25,409 INFO [M:0;e025332d312f:35487 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:36:25,409 INFO [M:0;e025332d312f:35487 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T13:36:25,409 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T13:36:25,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T13:36:25,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:25,418 DEBUG [M:0;e025332d312f:35487 {}] zookeeper.ZKUtil(347): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T13:36:25,418 WARN [M:0;e025332d312f:35487 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T13:36:25,419 INFO [M:0;e025332d312f:35487 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/.lastflushedseqids 2024-11-22T13:36:25,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741905_1091 (size=130) 2024-11-22T13:36:25,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741905_1091 (size=130) 2024-11-22T13:36:25,430 INFO [M:0;e025332d312f:35487 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T13:36:25,431 INFO [M:0;e025332d312f:35487 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T13:36:25,431 DEBUG [M:0;e025332d312f:35487 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:36:25,431 INFO [M:0;e025332d312f:35487 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:25,431 DEBUG [M:0;e025332d312f:35487 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:25,431 DEBUG [M:0;e025332d312f:35487 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:36:25,431 DEBUG [M:0;e025332d312f:35487 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:25,431 INFO [M:0;e025332d312f:35487 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-22T13:36:25,450 DEBUG [M:0;e025332d312f:35487 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1aa359dd220942c9b835bfcd5204e76d is 82, key is hbase:meta,,1/info:regioninfo/1732282541253/Put/seqid=0 2024-11-22T13:36:25,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741906_1092 (size=5672) 2024-11-22T13:36:25,456 INFO [M:0;e025332d312f:35487 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1aa359dd220942c9b835bfcd5204e76d 2024-11-22T13:36:25,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741906_1092 (size=5672) 2024-11-22T13:36:25,478 DEBUG [M:0;e025332d312f:35487 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9b5ecf316abe4bc6a469f4c4d308dae8 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732282541920/Put/seqid=0 2024-11-22T13:36:25,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741907_1093 (size=6255) 2024-11-22T13:36:25,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741907_1093 (size=6255) 2024-11-22T13:36:25,484 INFO [M:0;e025332d312f:35487 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9b5ecf316abe4bc6a469f4c4d308dae8 2024-11-22T13:36:25,488 INFO [M:0;e025332d312f:35487 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9b5ecf316abe4bc6a469f4c4d308dae8 2024-11-22T13:36:25,497 INFO [RS:0;e025332d312f:41959 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:36:25,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:36:25,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41959-0x10162c1db350001, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:36:25,497 INFO [RS:0;e025332d312f:41959 {}] regionserver.HRegionServer(1031): Exiting; stopping=e025332d312f,41959,1732282540115; zookeeper connection closed. 2024-11-22T13:36:25,497 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@64db250a {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@64db250a 2024-11-22T13:36:25,497 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-22T13:36:25,503 DEBUG [M:0;e025332d312f:35487 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e04b65ad670a4c15aa8b76fd35cb73d8 is 69, key is e025332d312f,40403,1732282541385/rs:state/1732282541460/Put/seqid=0 2024-11-22T13:36:25,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741908_1094 (size=5224) 2024-11-22T13:36:25,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741908_1094 (size=5224) 2024-11-22T13:36:25,508 INFO [M:0;e025332d312f:35487 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e04b65ad670a4c15aa8b76fd35cb73d8 2024-11-22T13:36:25,528 DEBUG [M:0;e025332d312f:35487 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aa3c109b01ed4364863cd5e50afebe98 is 52, key is load_balancer_on/state:d/1732282541367/Put/seqid=0 2024-11-22T13:36:25,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741909_1095 (size=5056) 2024-11-22T13:36:25,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741909_1095 (size=5056) 2024-11-22T13:36:25,533 INFO [M:0;e025332d312f:35487 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aa3c109b01ed4364863cd5e50afebe98 2024-11-22T13:36:25,538 DEBUG [M:0;e025332d312f:35487 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1aa359dd220942c9b835bfcd5204e76d as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1aa359dd220942c9b835bfcd5204e76d 2024-11-22T13:36:25,543 INFO [M:0;e025332d312f:35487 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1aa359dd220942c9b835bfcd5204e76d, entries=8, sequenceid=60, filesize=5.5 K 2024-11-22T13:36:25,544 DEBUG [M:0;e025332d312f:35487 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9b5ecf316abe4bc6a469f4c4d308dae8 as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9b5ecf316abe4bc6a469f4c4d308dae8 2024-11-22T13:36:25,551 INFO [M:0;e025332d312f:35487 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9b5ecf316abe4bc6a469f4c4d308dae8 2024-11-22T13:36:25,551 INFO [M:0;e025332d312f:35487 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9b5ecf316abe4bc6a469f4c4d308dae8, entries=6, sequenceid=60, filesize=6.1 K 2024-11-22T13:36:25,552 DEBUG [M:0;e025332d312f:35487 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e04b65ad670a4c15aa8b76fd35cb73d8 as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e04b65ad670a4c15aa8b76fd35cb73d8 2024-11-22T13:36:25,557 INFO [M:0;e025332d312f:35487 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e04b65ad670a4c15aa8b76fd35cb73d8, entries=2, sequenceid=60, filesize=5.1 K 2024-11-22T13:36:25,558 DEBUG [M:0;e025332d312f:35487 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/aa3c109b01ed4364863cd5e50afebe98 as hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aa3c109b01ed4364863cd5e50afebe98 2024-11-22T13:36:25,563 INFO [M:0;e025332d312f:35487 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/aa3c109b01ed4364863cd5e50afebe98, entries=1, sequenceid=60, filesize=4.9 K 2024-11-22T13:36:25,564 INFO [M:0;e025332d312f:35487 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=60, compaction requested=false 2024-11-22T13:36:25,566 INFO [M:0;e025332d312f:35487 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:25,566 DEBUG [M:0;e025332d312f:35487 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282585431Disabling compacts and flushes for region at 1732282585431Disabling writes for close at 1732282585431Obtaining lock to block concurrent updates at 1732282585431Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732282585431Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732282585432 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732282585433 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732282585433Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732282585450 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732282585450Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732282585461 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732282585477 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732282585477Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732282585488 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732282585503 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732282585503Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732282585514 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732282585528 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732282585528Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fd0b3f6: reopening flushed file at 1732282585537 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ade4e4: reopening flushed file at 1732282585543 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25ccf9ee: reopening flushed file at 1732282585551 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5723bf02: reopening flushed file at 1732282585557 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=60, compaction requested=false at 1732282585564 (+7 ms)Writing region close event to WAL at 1732282585566 (+2 ms)Closed at 1732282585566 2024-11-22T13:36:25,566 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,566 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,566 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,566 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,566 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:25,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38567 is added to blk_1073741892_1075 (size=1045) 2024-11-22T13:36:25,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38049 is added to blk_1073741892_1075 (size=1045) 2024-11-22T13:36:25,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:25,734 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:26,160 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@3a63bc4b {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-64778002-172.17.0.2-1732282537642:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:45457,null,null]) java.net.ConnectException: Call From e025332d312f/172.17.0.2 to localhost:38707 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T13:36:26,520 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/WALs/e025332d312f,35487,1732282539938/e025332d312f%2C35487%2C1732282539938.1732282540263 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/oldWALs/e025332d312f%2C35487%2C1732282539938.1732282540263 2024-11-22T13:36:26,529 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/MasterData/oldWALs/e025332d312f%2C35487%2C1732282539938.1732282540263 to hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/oldWALs/e025332d312f%2C35487%2C1732282539938.1732282540263$masterlocalwal$ 2024-11-22T13:36:26,530 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:36:26,530 INFO [M:0;e025332d312f:35487 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T13:36:26,530 INFO [M:0;e025332d312f:35487 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35487 2024-11-22T13:36:26,530 INFO [M:0;e025332d312f:35487 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:36:26,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:36:26,676 INFO [M:0;e025332d312f:35487 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:36:26,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35487-0x10162c1db350000, quorum=127.0.0.1:62897, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:36:26,683 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@593e8d90{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:26,684 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@213456fa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:36:26,684 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:36:26,684 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55126aa0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:36:26,684 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@179dab5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,STOPPED} 2024-11-22T13:36:26,685 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:36:26,686 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:36:26,686 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-64778002-172.17.0.2-1732282537642 (Datanode Uuid f18fa2ec-361a-4869-ab7d-fbdeafde956b) service to localhost/127.0.0.1:46035 2024-11-22T13:36:26,686 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:36:26,685 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:45457,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:38707 , LocalHost:localPort e025332d312f/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T13:36:26,686 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:38567,null,null]) java.io.IOException: No block pool offer service for bpid=BP-64778002-172.17.0.2-1732282537642 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:26,686 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45457,null,null], DatanodeInfoWithStorage[127.0.0.1:38567,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-64778002-172.17.0.2-1732282537642:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:45457,null,null], DatanodeInfoWithStorage[127.0.0.1:38567,null,null]] 2024-11-22T13:36:26,686 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data3/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:26,686 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45457,null,null]) java.io.IOException: No block pool offer service for bpid=BP-64778002-172.17.0.2-1732282537642 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:26,687 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38567,null,null]) java.io.IOException: No block pool offer service for bpid=BP-64778002-172.17.0.2-1732282537642 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:26,687 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@421fdd25 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45457,null,null], DatanodeInfoWithStorage[127.0.0.1:38567,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-64778002-172.17.0.2-1732282537642:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:45457,null,null], DatanodeInfoWithStorage[127.0.0.1:38567,null,null]] 2024-11-22T13:36:26,687 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data4/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:26,687 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:36:26,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@492554e1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:26,690 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3f85c2b2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:36:26,690 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:36:26,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a9d9bc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:36:26,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a28e14{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,STOPPED} 2024-11-22T13:36:26,691 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:36:26,691 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:36:26,691 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:36:26,691 WARN [BP-64778002-172.17.0.2-1732282537642 heartbeating to localhost/127.0.0.1:46035 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-64778002-172.17.0.2-1732282537642 (Datanode Uuid 3bc36fb6-793d-4aae-a82e-3c85098b9a2b) service to localhost/127.0.0.1:46035 2024-11-22T13:36:26,692 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data5/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:26,692 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/cluster_1507c2eb-b664-e97d-1bd2-4f7d2d9966d0/data/data6/current/BP-64778002-172.17.0.2-1732282537642 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:26,692 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:36:26,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e195dbd{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:36:26,698 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d9b1613{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:36:26,699 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:36:26,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20aa2ea7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:36:26,699 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b918d2a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir/,STOPPED} 2024-11-22T13:36:26,707 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T13:36:26,725 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:26,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:26,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T13:36:26,745 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=157 (was 82) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46035 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46035 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:36837 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36837 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46035 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46035 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46035 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:46035 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46035 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:46035 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46035 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f3b18bf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:46035 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46035 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f3b18bf4928.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=64 (was 58) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2551 (was 2996) 2024-11-22T13:36:26,752 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=157, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=64, ProcessCount=11, AvailableMemoryMB=2550 2024-11-22T13:36:26,752 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.log.dir so I do NOT create it in target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6023de5d-900b-7683-4b01-123afbbaa91c/hadoop.tmp.dir so I do NOT create it in target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee, deleteOnExit=true 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/test.cache.data in system properties and HBase conf 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir in system properties and HBase conf 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T13:36:26,753 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T13:36:26,753 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/nfs.dump.dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/java.io.tmpdir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T13:36:26,754 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T13:36:26,766 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:36:26,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T13:36:26,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:36:26,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T13:36:26,899 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T13:36:27,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:36:27,171 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:36:27,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:36:27,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:36:27,172 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:36:27,173 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:36:27,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@140caf6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:36:27,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2835f29c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:36:27,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1fc8bed8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/java.io.tmpdir/jetty-localhost-36191-hadoop-hdfs-3_4_1-tests_jar-_-any-2541964534669128562/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:36:27,266 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42aa99e7{HTTP/1.1, (http/1.1)}{localhost:36191} 2024-11-22T13:36:27,266 INFO [Time-limited test {}] server.Server(415): Started @156446ms 2024-11-22T13:36:27,278 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:36:27,528 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:36:27,531 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:36:27,532 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:36:27,532 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:36:27,532 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:36:27,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33cf8bc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:36:27,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60abc71f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:36:27,626 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60deb4a2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/java.io.tmpdir/jetty-localhost-46785-hadoop-hdfs-3_4_1-tests_jar-_-any-11576753693200232520/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:27,626 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a75563d{HTTP/1.1, (http/1.1)}{localhost:46785} 2024-11-22T13:36:27,627 INFO [Time-limited test {}] server.Server(415): Started @156806ms 2024-11-22T13:36:27,628 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:36:27,654 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:36:27,659 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:36:27,660 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:36:27,660 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:36:27,660 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:36:27,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c07ac8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:36:27,661 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@392000f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:36:27,726 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:27,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:27,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62169090{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/java.io.tmpdir/jetty-localhost-43229-hadoop-hdfs-3_4_1-tests_jar-_-any-15493644546823044076/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:27,754 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b2d1260{HTTP/1.1, (http/1.1)}{localhost:43229} 2024-11-22T13:36:27,754 INFO [Time-limited test {}] server.Server(415): Started @156934ms 2024-11-22T13:36:27,755 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:36:28,727 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:28,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:28,811 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data1/current/BP-1891882766-172.17.0.2-1732282586777/current, will proceed with Du for space computation calculation, 2024-11-22T13:36:28,811 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data2/current/BP-1891882766-172.17.0.2-1732282586777/current, will proceed with Du for space computation calculation, 2024-11-22T13:36:28,828 WARN [Thread-1169 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:36:28,830 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7af6790c567318eb with lease ID 0x3caa58f0daae69ad: Processing first storage report for DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b from datanode DatanodeRegistration(127.0.0.1:41283, datanodeUuid=b4ffa5e1-70a8-4e25-bc37-c92e218af6d5, infoPort=37929, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777) 2024-11-22T13:36:28,830 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7af6790c567318eb with lease ID 0x3caa58f0daae69ad: from storage DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b node DatanodeRegistration(127.0.0.1:41283, datanodeUuid=b4ffa5e1-70a8-4e25-bc37-c92e218af6d5, infoPort=37929, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:28,831 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7af6790c567318eb with lease ID 0x3caa58f0daae69ad: Processing first storage report for DS-4f790a47-b865-45bc-aa1c-bee1ec8fc60f from datanode DatanodeRegistration(127.0.0.1:41283, datanodeUuid=b4ffa5e1-70a8-4e25-bc37-c92e218af6d5, infoPort=37929, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777) 2024-11-22T13:36:28,831 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7af6790c567318eb with lease ID 0x3caa58f0daae69ad: from storage DS-4f790a47-b865-45bc-aa1c-bee1ec8fc60f node DatanodeRegistration(127.0.0.1:41283, datanodeUuid=b4ffa5e1-70a8-4e25-bc37-c92e218af6d5, infoPort=37929, infoSecurePort=0, ipcPort=35443, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:28,964 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data3/current/BP-1891882766-172.17.0.2-1732282586777/current, will proceed with Du for space computation calculation, 2024-11-22T13:36:28,964 WARN [Thread-1217 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data4/current/BP-1891882766-172.17.0.2-1732282586777/current, will proceed with Du for space computation calculation, 2024-11-22T13:36:28,983 WARN [Thread-1192 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:36:28,985 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42404273d8d99273 with lease ID 0x3caa58f0daae69ae: Processing first storage report for DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5 from datanode DatanodeRegistration(127.0.0.1:34223, datanodeUuid=c0dd6e3b-3203-46b9-b858-186208f131df, infoPort=43965, infoSecurePort=0, ipcPort=43983, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777) 2024-11-22T13:36:28,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42404273d8d99273 with lease ID 0x3caa58f0daae69ae: from storage DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5 node DatanodeRegistration(127.0.0.1:34223, datanodeUuid=c0dd6e3b-3203-46b9-b858-186208f131df, infoPort=43965, infoSecurePort=0, ipcPort=43983, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:28,985 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x42404273d8d99273 with lease ID 0x3caa58f0daae69ae: Processing first storage report for DS-b61a50e8-e83a-4278-a790-37468070bd81 from datanode DatanodeRegistration(127.0.0.1:34223, datanodeUuid=c0dd6e3b-3203-46b9-b858-186208f131df, infoPort=43965, infoSecurePort=0, ipcPort=43983, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777) 2024-11-22T13:36:28,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x42404273d8d99273 with lease ID 0x3caa58f0daae69ae: from storage DS-b61a50e8-e83a-4278-a790-37468070bd81 node DatanodeRegistration(127.0.0.1:34223, datanodeUuid=c0dd6e3b-3203-46b9-b858-186208f131df, infoPort=43965, infoSecurePort=0, ipcPort=43983, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:28,991 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58 2024-11-22T13:36:28,994 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/zookeeper_0, clientPort=61784, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T13:36:28,995 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61784 2024-11-22T13:36:28,995 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:36:28,999 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:36:29,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41283 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:36:29,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34223 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:36:29,009 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7 with version=8 2024-11-22T13:36:29,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/hbase-staging 2024-11-22T13:36:29,012 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:36:29,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:36:29,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:36:29,012 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:36:29,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:36:29,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:36:29,012 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T13:36:29,012 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:36:29,013 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35061 2024-11-22T13:36:29,015 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35061 connecting to ZooKeeper ensemble=127.0.0.1:61784 2024-11-22T13:36:29,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350610x0, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:36:29,066 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35061-0x10162c29aed0000 connected 2024-11-22T13:36:29,155 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:36:29,160 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:36:29,162 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:36:29,163 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7, hbase.cluster.distributed=false 2024-11-22T13:36:29,165 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:36:29,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35061 2024-11-22T13:36:29,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35061 2024-11-22T13:36:29,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35061 2024-11-22T13:36:29,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35061 2024-11-22T13:36:29,167 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35061 2024-11-22T13:36:29,184 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:36:29,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:36:29,184 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:36:29,184 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:36:29,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:36:29,185 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:36:29,185 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T13:36:29,185 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:36:29,185 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43097 2024-11-22T13:36:29,187 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43097 connecting to ZooKeeper ensemble=127.0.0.1:61784 2024-11-22T13:36:29,187 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:36:29,189 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:36:29,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:430970x0, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:36:29,204 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43097-0x10162c29aed0001 connected 2024-11-22T13:36:29,204 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:36:29,204 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T13:36:29,205 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T13:36:29,206 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T13:36:29,207 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:36:29,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43097 2024-11-22T13:36:29,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43097 2024-11-22T13:36:29,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43097 2024-11-22T13:36:29,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43097 2024-11-22T13:36:29,209 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43097 2024-11-22T13:36:29,224 DEBUG [M:0;e025332d312f:35061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e025332d312f:35061 2024-11-22T13:36:29,224 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e025332d312f,35061,1732282589011 2024-11-22T13:36:29,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:36:29,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:36:29,235 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e025332d312f,35061,1732282589011 2024-11-22T13:36:29,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T13:36:29,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,246 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T13:36:29,247 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e025332d312f,35061,1732282589011 from backup master directory 2024-11-22T13:36:29,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e025332d312f,35061,1732282589011 2024-11-22T13:36:29,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:36:29,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:36:29,259 WARN [master/e025332d312f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:36:29,260 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e025332d312f,35061,1732282589011 2024-11-22T13:36:29,268 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/hbase.id] with ID: 56ecc500-bf92-4749-9913-cfe3b92b4079 2024-11-22T13:36:29,268 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/.tmp/hbase.id 2024-11-22T13:36:29,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41283 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:36:29,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34223 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:36:29,274 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/.tmp/hbase.id]:[hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/hbase.id] 2024-11-22T13:36:29,285 INFO [master/e025332d312f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:36:29,285 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T13:36:29,287 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T13:36:29,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41283 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:36:29,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34223 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:36:29,305 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:36:29,305 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T13:36:29,306 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:36:29,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34223 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:36:29,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41283 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:36:29,313 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store 2024-11-22T13:36:29,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41283 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:36:29,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34223 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:36:29,320 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:36:29,320 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:36:29,320 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:29,320 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:29,320 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:36:29,320 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:29,320 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:29,320 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282589320Disabling compacts and flushes for region at 1732282589320Disabling writes for close at 1732282589320Writing region close event to WAL at 1732282589320Closed at 1732282589320 2024-11-22T13:36:29,321 WARN [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/.initializing 2024-11-22T13:36:29,321 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011 2024-11-22T13:36:29,324 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C35061%2C1732282589011, suffix=, logDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011, archiveDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/oldWALs, maxLogs=10 2024-11-22T13:36:29,324 INFO [master/e025332d312f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C35061%2C1732282589011.1732282589324 2024-11-22T13:36:29,329 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282589324 2024-11-22T13:36:29,332 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37929:37929),(127.0.0.1/127.0.0.1:43965:43965)] 2024-11-22T13:36:29,333 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:36:29,334 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:36:29,334 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,334 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,335 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,336 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T13:36:29,336 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:29,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:36:29,337 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,338 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T13:36:29,338 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:29,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:36:29,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,340 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T13:36:29,340 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:29,340 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:36:29,340 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,341 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T13:36:29,342 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:29,342 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:36:29,342 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,343 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,343 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,344 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,344 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,344 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T13:36:29,345 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:36:29,348 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:36:29,348 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787568, jitterRate=0.0014457553625106812}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T13:36:29,349 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732282589334Initializing all the Stores at 1732282589335 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282589335Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282589335Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282589335Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282589335Cleaning up temporary data from old regions at 1732282589344 (+9 ms)Region opened successfully at 1732282589349 (+5 ms) 2024-11-22T13:36:29,352 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T13:36:29,355 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1db98835, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:36:29,356 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T13:36:29,356 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T13:36:29,356 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T13:36:29,356 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T13:36:29,357 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T13:36:29,357 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T13:36:29,357 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T13:36:29,359 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T13:36:29,360 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T13:36:29,371 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T13:36:29,372 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T13:36:29,373 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T13:36:29,385 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T13:36:29,386 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T13:36:29,387 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T13:36:29,396 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T13:36:29,398 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T13:36:29,406 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T13:36:29,409 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T13:36:29,477 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T13:36:29,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:36:29,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:36:29,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,575 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e025332d312f,35061,1732282589011, sessionid=0x10162c29aed0000, setting cluster-up flag (Was=false) 2024-11-22T13:36:29,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,649 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T13:36:29,654 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,35061,1732282589011 2024-11-22T13:36:29,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:29,712 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T13:36:29,717 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,35061,1732282589011 2024-11-22T13:36:29,721 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T13:36:29,724 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T13:36:29,724 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T13:36:29,724 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T13:36:29,724 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e025332d312f,35061,1732282589011 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T13:36:29,726 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:36:29,726 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:36:29,726 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:36:29,726 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:36:29,726 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e025332d312f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T13:36:29,727 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,727 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:36:29,727 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,728 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732282619727 2024-11-22T13:36:29,728 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T13:36:29,728 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T13:36:29,728 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T13:36:29,728 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T13:36:29,728 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T13:36:29,728 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T13:36:29,728 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,728 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:29,728 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:36:29,728 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T13:36:29,729 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T13:36:29,729 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T13:36:29,729 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T13:36:29,729 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T13:36:29,729 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T13:36:29,729 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282589729,5,FailOnTimeoutGroup] 2024-11-22T13:36:29,729 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282589729,5,FailOnTimeoutGroup] 2024-11-22T13:36:29,729 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,729 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T13:36:29,730 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,730 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,730 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:29,730 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T13:36:29,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34223 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:36:29,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41283 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:36:29,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:29,738 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T13:36:29,738 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7 2024-11-22T13:36:29,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34223 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:36:29,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41283 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:36:29,747 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:36:29,749 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:36:29,751 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:36:29,751 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:29,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:36:29,752 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:36:29,753 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:36:29,753 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:29,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:36:29,754 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:36:29,755 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:36:29,755 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:29,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:36:29,756 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:36:29,757 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:36:29,757 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:29,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:36:29,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:36:29,758 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740 2024-11-22T13:36:29,759 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740 2024-11-22T13:36:29,760 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:36:29,760 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:36:29,761 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:36:29,762 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:36:29,764 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:36:29,765 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842617, jitterRate=0.071443110704422}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:36:29,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732282589747Initializing all the Stores at 1732282589749 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282589749Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282589749Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282589749Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282589749Cleaning up temporary data from old regions at 1732282589760 (+11 ms)Region opened successfully at 1732282589765 (+5 ms) 2024-11-22T13:36:29,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:36:29,765 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:36:29,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:36:29,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:36:29,765 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:36:29,766 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:36:29,766 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282589765Disabling compacts and flushes for region at 1732282589765Disabling writes for close at 1732282589765Writing region close event to WAL at 1732282589766 (+1 ms)Closed at 1732282589766 2024-11-22T13:36:29,767 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:36:29,767 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T13:36:29,768 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T13:36:29,769 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:36:29,770 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T13:36:29,811 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(746): ClusterId : 56ecc500-bf92-4749-9913-cfe3b92b4079 2024-11-22T13:36:29,812 DEBUG [RS:0;e025332d312f:43097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T13:36:29,825 DEBUG [RS:0;e025332d312f:43097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T13:36:29,826 DEBUG [RS:0;e025332d312f:43097 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T13:36:29,839 DEBUG [RS:0;e025332d312f:43097 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T13:36:29,840 DEBUG [RS:0;e025332d312f:43097 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4242eef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:36:29,856 DEBUG [RS:0;e025332d312f:43097 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e025332d312f:43097 2024-11-22T13:36:29,856 INFO [RS:0;e025332d312f:43097 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T13:36:29,856 INFO [RS:0;e025332d312f:43097 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T13:36:29,856 DEBUG [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T13:36:29,857 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(2659): reportForDuty to master=e025332d312f,35061,1732282589011 with port=43097, startcode=1732282589184 2024-11-22T13:36:29,857 DEBUG [RS:0;e025332d312f:43097 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T13:36:29,859 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44495, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T13:36:29,859 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35061 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e025332d312f,43097,1732282589184 2024-11-22T13:36:29,859 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35061 {}] master.ServerManager(517): Registering regionserver=e025332d312f,43097,1732282589184 2024-11-22T13:36:29,861 DEBUG [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7 2024-11-22T13:36:29,861 DEBUG [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33787 2024-11-22T13:36:29,861 DEBUG [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T13:36:29,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:36:29,870 DEBUG [RS:0;e025332d312f:43097 {}] zookeeper.ZKUtil(111): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e025332d312f,43097,1732282589184 2024-11-22T13:36:29,870 WARN [RS:0;e025332d312f:43097 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:36:29,870 INFO [RS:0;e025332d312f:43097 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:36:29,870 DEBUG [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184 2024-11-22T13:36:29,871 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e025332d312f,43097,1732282589184] 2024-11-22T13:36:29,874 INFO [RS:0;e025332d312f:43097 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T13:36:29,875 INFO [RS:0;e025332d312f:43097 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T13:36:29,876 INFO [RS:0;e025332d312f:43097 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T13:36:29,876 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,876 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T13:36:29,877 INFO [RS:0;e025332d312f:43097 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T13:36:29,877 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,877 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:36:29,878 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:36:29,878 DEBUG [RS:0;e025332d312f:43097 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:36:29,878 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,878 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,878 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,878 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,878 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,878 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,43097,1732282589184-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:36:29,893 INFO [RS:0;e025332d312f:43097 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T13:36:29,893 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,43097,1732282589184-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,893 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,893 INFO [RS:0;e025332d312f:43097 {}] regionserver.Replication(171): e025332d312f,43097,1732282589184 started 2024-11-22T13:36:29,906 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:29,906 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(1482): Serving as e025332d312f,43097,1732282589184, RpcServer on e025332d312f/172.17.0.2:43097, sessionid=0x10162c29aed0001 2024-11-22T13:36:29,906 DEBUG [RS:0;e025332d312f:43097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T13:36:29,906 DEBUG [RS:0;e025332d312f:43097 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e025332d312f,43097,1732282589184 2024-11-22T13:36:29,906 DEBUG [RS:0;e025332d312f:43097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,43097,1732282589184' 2024-11-22T13:36:29,906 DEBUG [RS:0;e025332d312f:43097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T13:36:29,907 DEBUG [RS:0;e025332d312f:43097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T13:36:29,907 DEBUG [RS:0;e025332d312f:43097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T13:36:29,907 DEBUG [RS:0;e025332d312f:43097 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T13:36:29,907 DEBUG [RS:0;e025332d312f:43097 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e025332d312f,43097,1732282589184 2024-11-22T13:36:29,907 DEBUG [RS:0;e025332d312f:43097 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,43097,1732282589184' 2024-11-22T13:36:29,907 DEBUG [RS:0;e025332d312f:43097 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T13:36:29,908 DEBUG [RS:0;e025332d312f:43097 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T13:36:29,908 DEBUG [RS:0;e025332d312f:43097 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T13:36:29,908 INFO [RS:0;e025332d312f:43097 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T13:36:29,908 INFO [RS:0;e025332d312f:43097 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T13:36:29,920 WARN [e025332d312f:35061 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T13:36:30,011 INFO [RS:0;e025332d312f:43097 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C43097%2C1732282589184, suffix=, logDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184, archiveDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/oldWALs, maxLogs=32 2024-11-22T13:36:30,013 INFO [RS:0;e025332d312f:43097 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C43097%2C1732282589184.1732282590012 2024-11-22T13:36:30,022 INFO [RS:0;e025332d312f:43097 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 2024-11-22T13:36:30,024 DEBUG [RS:0;e025332d312f:43097 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37929:37929),(127.0.0.1/127.0.0.1:43965:43965)] 2024-11-22T13:36:30,170 DEBUG [e025332d312f:35061 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T13:36:30,171 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e025332d312f,43097,1732282589184 2024-11-22T13:36:30,174 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,43097,1732282589184, state=OPENING 2024-11-22T13:36:30,228 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T13:36:30,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:30,239 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:30,240 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:36:30,241 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,43097,1732282589184}] 2024-11-22T13:36:30,241 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:36:30,241 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:36:30,398 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T13:36:30,403 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33367, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T13:36:30,411 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T13:36:30,411 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:36:30,414 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C43097%2C1732282589184.meta, suffix=.meta, logDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184, archiveDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/oldWALs, maxLogs=32 2024-11-22T13:36:30,415 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta 2024-11-22T13:36:30,422 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta 2024-11-22T13:36:30,428 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43965:43965),(127.0.0.1/127.0.0.1:37929:37929)] 2024-11-22T13:36:30,432 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:36:30,433 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T13:36:30,433 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T13:36:30,433 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T13:36:30,433 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T13:36:30,433 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:36:30,433 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T13:36:30,433 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T13:36:30,434 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:36:30,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:36:30,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:30,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:36:30,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:36:30,437 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:36:30,437 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:30,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:36:30,437 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:36:30,438 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:36:30,438 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:30,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:36:30,439 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:36:30,439 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:36:30,439 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:30,440 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:36:30,440 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:36:30,440 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740 2024-11-22T13:36:30,442 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740 2024-11-22T13:36:30,443 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:36:30,443 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:36:30,443 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:36:30,445 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:36:30,446 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703175, jitterRate=-0.10586783289909363}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:36:30,446 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T13:36:30,446 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732282590433Writing region info on filesystem at 1732282590433Initializing all the Stores at 1732282590434 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282590434Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282590434Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282590434Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282590434Cleaning up temporary data from old regions at 1732282590443 (+9 ms)Running coprocessor post-open hooks at 1732282590446 (+3 ms)Region opened successfully at 1732282590446 2024-11-22T13:36:30,447 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732282590397 2024-11-22T13:36:30,450 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T13:36:30,450 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T13:36:30,451 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,43097,1732282589184 2024-11-22T13:36:30,452 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,43097,1732282589184, state=OPEN 2024-11-22T13:36:30,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:36:30,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:36:30,491 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e025332d312f,43097,1732282589184 2024-11-22T13:36:30,491 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:36:30,491 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:36:30,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T13:36:30,497 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,43097,1732282589184 in 250 msec 2024-11-22T13:36:30,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T13:36:30,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 729 msec 2024-11-22T13:36:30,501 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:36:30,502 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T13:36:30,503 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:36:30,503 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,43097,1732282589184, seqNum=-1] 2024-11-22T13:36:30,503 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:36:30,505 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36655, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:36:30,511 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 788 msec 2024-11-22T13:36:30,511 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732282590511, completionTime=-1 2024-11-22T13:36:30,511 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T13:36:30,512 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T13:36:30,513 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T13:36:30,513 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732282650513 2024-11-22T13:36:30,513 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732282710513 2024-11-22T13:36:30,513 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-22T13:36:30,514 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35061,1732282589011-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:30,514 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35061,1732282589011-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:30,514 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35061,1732282589011-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:30,514 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e025332d312f:35061, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:30,514 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:30,514 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:30,516 DEBUG [master/e025332d312f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T13:36:30,518 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.258sec 2024-11-22T13:36:30,518 INFO [master/e025332d312f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T13:36:30,518 INFO [master/e025332d312f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T13:36:30,518 INFO [master/e025332d312f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T13:36:30,518 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T13:36:30,518 INFO [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T13:36:30,518 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35061,1732282589011-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:36:30,518 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35061,1732282589011-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T13:36:30,521 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T13:36:30,521 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T13:36:30,521 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35061,1732282589011-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:36:30,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117b424d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:36:30,612 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e025332d312f,35061,-1 for getting cluster id 2024-11-22T13:36:30,613 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T13:36:30,616 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '56ecc500-bf92-4749-9913-cfe3b92b4079' 2024-11-22T13:36:30,616 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T13:36:30,616 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "56ecc500-bf92-4749-9913-cfe3b92b4079" 2024-11-22T13:36:30,617 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1af89d9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:36:30,617 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e025332d312f,35061,-1] 2024-11-22T13:36:30,617 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T13:36:30,618 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:30,620 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35594, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T13:36:30,621 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e70018, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:36:30,622 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:36:30,623 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,43097,1732282589184, seqNum=-1] 2024-11-22T13:36:30,624 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:36:30,627 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49094, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:36:30,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e025332d312f,35061,1732282589011 2024-11-22T13:36:30,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:36:30,632 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T13:36:30,632 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-22T13:36:30,633 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-22T13:36:30,633 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T13:36:30,634 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is e025332d312f,35061,1732282589011 2024-11-22T13:36:30,634 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2a2efa67 2024-11-22T13:36:30,634 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T13:36:30,637 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35608, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T13:36:30,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35061 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T13:36:30,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35061 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T13:36:30,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35061 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:36:30,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35061 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T13:36:30,641 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T13:36:30,641 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:30,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35061 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-22T13:36:30,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:36:30,643 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T13:36:30,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41283 is added to blk_1073741835_1011 (size=395) 2024-11-22T13:36:30,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34223 is added to blk_1073741835_1011 (size=395) 2024-11-22T13:36:30,653 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 45814694ef682d42714d8c280c049576, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7 2024-11-22T13:36:30,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34223 is added to blk_1073741836_1012 (size=78) 2024-11-22T13:36:30,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41283 is added to blk_1073741836_1012 (size=78) 2024-11-22T13:36:30,660 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:36:30,660 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 45814694ef682d42714d8c280c049576, disabling compactions & flushes 2024-11-22T13:36:30,660 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:30,660 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:30,660 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. after waiting 0 ms 2024-11-22T13:36:30,660 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:30,660 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:30,660 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 45814694ef682d42714d8c280c049576: Waiting for close lock at 1732282590660Disabling compacts and flushes for region at 1732282590660Disabling writes for close at 1732282590660Writing region close event to WAL at 1732282590660Closed at 1732282590660 2024-11-22T13:36:30,662 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T13:36:30,662 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732282590662"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732282590662"}]},"ts":"1732282590662"} 2024-11-22T13:36:30,664 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T13:36:30,665 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T13:36:30,665 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282590665"}]},"ts":"1732282590665"} 2024-11-22T13:36:30,667 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-22T13:36:30,668 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=45814694ef682d42714d8c280c049576, ASSIGN}] 2024-11-22T13:36:30,669 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=45814694ef682d42714d8c280c049576, ASSIGN 2024-11-22T13:36:30,670 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=45814694ef682d42714d8c280c049576, ASSIGN; state=OFFLINE, location=e025332d312f,43097,1732282589184; forceNewPlan=false, retain=false 2024-11-22T13:36:30,729 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:30,738 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:30,821 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=45814694ef682d42714d8c280c049576, regionState=OPENING, regionLocation=e025332d312f,43097,1732282589184 2024-11-22T13:36:30,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=45814694ef682d42714d8c280c049576, ASSIGN because future has completed 2024-11-22T13:36:30,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 45814694ef682d42714d8c280c049576, server=e025332d312f,43097,1732282589184}] 2024-11-22T13:36:30,992 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:30,993 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 45814694ef682d42714d8c280c049576, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:36:30,994 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 45814694ef682d42714d8c280c049576 2024-11-22T13:36:30,994 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:36:30,994 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 45814694ef682d42714d8c280c049576 2024-11-22T13:36:30,994 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 45814694ef682d42714d8c280c049576 2024-11-22T13:36:30,997 INFO [StoreOpener-45814694ef682d42714d8c280c049576-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 45814694ef682d42714d8c280c049576 2024-11-22T13:36:30,999 INFO [StoreOpener-45814694ef682d42714d8c280c049576-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45814694ef682d42714d8c280c049576 columnFamilyName info 2024-11-22T13:36:30,999 DEBUG [StoreOpener-45814694ef682d42714d8c280c049576-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:36:31,000 INFO [StoreOpener-45814694ef682d42714d8c280c049576-1 {}] regionserver.HStore(327): Store=45814694ef682d42714d8c280c049576/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:36:31,000 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 45814694ef682d42714d8c280c049576 2024-11-22T13:36:31,001 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/default/TestLogRolling-testLogRollOnPipelineRestart/45814694ef682d42714d8c280c049576 2024-11-22T13:36:31,001 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/default/TestLogRolling-testLogRollOnPipelineRestart/45814694ef682d42714d8c280c049576 2024-11-22T13:36:31,001 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 45814694ef682d42714d8c280c049576 2024-11-22T13:36:31,002 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 45814694ef682d42714d8c280c049576 2024-11-22T13:36:31,003 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 45814694ef682d42714d8c280c049576 2024-11-22T13:36:31,006 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/default/TestLogRolling-testLogRollOnPipelineRestart/45814694ef682d42714d8c280c049576/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:36:31,006 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 45814694ef682d42714d8c280c049576; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=791696, jitterRate=0.00669454038143158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T13:36:31,006 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 45814694ef682d42714d8c280c049576 2024-11-22T13:36:31,007 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 45814694ef682d42714d8c280c049576: Running coprocessor pre-open hook at 1732282590995Writing region info on filesystem at 1732282590995Initializing all the Stores at 1732282590997 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282590997Cleaning up temporary data from old regions at 1732282591002 (+5 ms)Running coprocessor post-open hooks at 1732282591006 (+4 ms)Region opened successfully at 1732282591007 (+1 ms) 2024-11-22T13:36:31,008 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576., pid=6, masterSystemTime=1732282590983 2024-11-22T13:36:31,011 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:31,011 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:31,012 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=45814694ef682d42714d8c280c049576, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,43097,1732282589184 2024-11-22T13:36:31,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 45814694ef682d42714d8c280c049576, server=e025332d312f,43097,1732282589184 because future has completed 2024-11-22T13:36:31,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T13:36:31,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 45814694ef682d42714d8c280c049576, server=e025332d312f,43097,1732282589184 in 187 msec 2024-11-22T13:36:31,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T13:36:31,021 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=45814694ef682d42714d8c280c049576, ASSIGN in 350 msec 2024-11-22T13:36:31,022 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T13:36:31,022 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282591022"}]},"ts":"1732282591022"} 2024-11-22T13:36:31,024 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-22T13:36:31,025 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T13:36:31,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 387 msec 2024-11-22T13:36:31,730 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:31,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:32,404 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T13:36:32,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,429 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:36:32,731 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:32,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:33,732 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:33,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:34,733 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:34,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:35,735 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:35,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:35,875 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T13:36:35,877 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-22T13:36:36,736 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:36,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:36,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T13:36:36,898 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T13:36:36,901 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T13:36:36,901 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-22T13:36:36,902 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:36:36,902 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T13:36:36,903 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T13:36:36,903 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T13:36:37,737 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:37,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:38,739 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:38,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:39,740 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:39,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:40,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35061 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:36:40,700 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-22T13:36:40,700 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-22T13:36:40,705 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T13:36:40,705 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:40,711 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576., hostname=e025332d312f,43097,1732282589184, seqNum=2] 2024-11-22T13:36:40,741 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:40,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:41,742 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:41,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:42,714 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 2024-11-22T13:36:42,716 WARN [ResponseProcessor for block BP-1891882766-172.17.0.2-1732282586777:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1891882766-172.17.0.2-1732282586777:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:42,717 WARN [ResponseProcessor for block BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:34223,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:42,717 WARN [DataStreamer for file /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282589324 block BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK], DatanodeInfoWithStorage[127.0.0.1:34223,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34223,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]) is bad. 2024-11-22T13:36:42,718 WARN [DataStreamer for file /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta block BP-1891882766-172.17.0.2-1732282586777:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1891882766-172.17.0.2-1732282586777:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34223,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK], DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34223,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]) is bad. 2024-11-22T13:36:42,718 WARN [ResponseProcessor for block BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:34223,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:42,719 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1319544701_22 at /127.0.0.1:38738 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38738 dst: /127.0.0.1:34223 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,719 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1319544701_22 at /127.0.0.1:34448 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34448 dst: /127.0.0.1:41283 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,719 WARN [DataStreamer for file /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 block BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK], DatanodeInfoWithStorage[127.0.0.1:34223,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34223,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]) is bad. 2024-11-22T13:36:42,719 WARN [PacketResponder: BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34223] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,720 WARN [PacketResponder: BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34223] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1319544701_22 at /127.0.0.1:34438 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34438 dst: /127.0.0.1:41283 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1687910554_22 at /127.0.0.1:34418 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34418 dst: /127.0.0.1:41283 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1687910554_22 at /127.0.0.1:38710 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38710 dst: /127.0.0.1:34223 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,721 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1319544701_22 at /127.0.0.1:38728 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38728 dst: /127.0.0.1:34223 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,743 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:42,751 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:42,761 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62169090{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:42,762 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b2d1260{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:36:42,762 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:36:42,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@392000f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:36:42,762 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c07ac8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,STOPPED} 2024-11-22T13:36:42,765 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:36:42,765 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:36:42,765 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891882766-172.17.0.2-1732282586777 (Datanode Uuid c0dd6e3b-3203-46b9-b858-186208f131df) service to localhost/127.0.0.1:33787 2024-11-22T13:36:42,765 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:36:42,766 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data3/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:42,767 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data4/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:42,767 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:36:42,777 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:36:42,781 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:36:42,782 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:36:42,782 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:36:42,782 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:36:42,782 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4eef3a93{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:36:42,783 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66943013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:36:42,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35936f2e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/java.io.tmpdir/jetty-localhost-40117-hadoop-hdfs-3_4_1-tests_jar-_-any-15870486956942438054/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:42,877 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4022a798{HTTP/1.1, (http/1.1)}{localhost:40117} 2024-11-22T13:36:42,877 INFO [Time-limited test {}] server.Server(415): Started @172056ms 2024-11-22T13:36:42,878 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:36:42,897 WARN [ResponseProcessor for block BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:42,897 WARN [ResponseProcessor for block BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:42,897 WARN [ResponseProcessor for block BP-1891882766-172.17.0.2-1732282586777:blk_1073741834_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1891882766-172.17.0.2-1732282586777:blk_1073741834_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:42,897 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1319544701_22 at /127.0.0.1:34218 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34218 dst: /127.0.0.1:41283 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1319544701_22 at /127.0.0.1:34220 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34220 dst: /127.0.0.1:41283 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,898 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1687910554_22 at /127.0.0.1:34202 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41283:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34202 dst: /127.0.0.1:41283 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:42,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60deb4a2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:42,901 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a75563d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:36:42,901 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:36:42,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60abc71f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:36:42,901 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33cf8bc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,STOPPED} 2024-11-22T13:36:42,903 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:36:42,903 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:36:42,903 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891882766-172.17.0.2-1732282586777 (Datanode Uuid b4ffa5e1-70a8-4e25-bc37-c92e218af6d5) service to localhost/127.0.0.1:33787 2024-11-22T13:36:42,903 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:36:42,903 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data1/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:42,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data2/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:42,904 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:36:42,912 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:36:42,915 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:36:42,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:36:42,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:36:42,916 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:36:42,916 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@168cc4fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:36:42,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@474594d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:36:43,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7790ff99{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/java.io.tmpdir/jetty-localhost-44039-hadoop-hdfs-3_4_1-tests_jar-_-any-13420902746414085568/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:43,009 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29b43d59{HTTP/1.1, (http/1.1)}{localhost:44039} 2024-11-22T13:36:43,009 INFO [Time-limited test {}] server.Server(415): Started @172188ms 2024-11-22T13:36:43,010 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:36:43,544 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:36:43,547 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x238eb846d1826124 with lease ID 0x3caa58f0daae69af: from storage DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5 node DatanodeRegistration(127.0.0.1:45167, datanodeUuid=c0dd6e3b-3203-46b9-b858-186208f131df, infoPort=36811, infoSecurePort=0, ipcPort=37179, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:43,547 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x238eb846d1826124 with lease ID 0x3caa58f0daae69af: from storage DS-b61a50e8-e83a-4278-a790-37468070bd81 node DatanodeRegistration(127.0.0.1:45167, datanodeUuid=c0dd6e3b-3203-46b9-b858-186208f131df, infoPort=36811, infoSecurePort=0, ipcPort=37179, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:43,635 WARN [Thread-1360 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:36:43,637 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4cd82e5c9e739f1 with lease ID 0x3caa58f0daae69b0: from storage DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b node DatanodeRegistration(127.0.0.1:36645, datanodeUuid=b4ffa5e1-70a8-4e25-bc37-c92e218af6d5, infoPort=38817, infoSecurePort=0, ipcPort=46411, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:43,637 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4cd82e5c9e739f1 with lease ID 0x3caa58f0daae69b0: from storage DS-4f790a47-b865-45bc-aa1c-bee1ec8fc60f node DatanodeRegistration(127.0.0.1:36645, datanodeUuid=b4ffa5e1-70a8-4e25-bc37-c92e218af6d5, infoPort=38817, infoSecurePort=0, ipcPort=46411, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:43,744 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:43,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:44,027 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-22T13:36:44,031 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-22T13:36:44,033 ERROR [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7-prefix:e025332d312f,43097,1732282589184 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:44,033 WARN [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7-prefix:e025332d312f,43097,1732282589184 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:44,033 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C43097%2C1732282589184:(num 1732282590012) roll requested 2024-11-22T13:36:44,034 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C43097%2C1732282589184.1732282604034 2024-11-22T13:36:44,042 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 newFile=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 2024-11-22T13:36:44,043 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:44,043 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:44,043 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:44,043 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:44,043 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:44,044 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 2024-11-22T13:36:44,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:44,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:44,044 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 2024-11-22T13:36:44,045 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36811:36811),(127.0.0.1/127.0.0.1:38817:38817)] 2024-11-22T13:36:44,045 WARN [IPC Server handler 4 on default port 33787 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1015 2024-11-22T13:36:44,045 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 is not closed yet, will try archiving it next time 2024-11-22T13:36:44,045 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 after 1ms 2024-11-22T13:36:44,745 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:44,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:45,547 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1015: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T13:36:45,746 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:45,754 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:46,050 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-22T13:36:46,747 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:46,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:47,748 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:47,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:48,047 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 after 4003ms 2024-11-22T13:36:48,057 WARN [ResponseProcessor for block BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:36645,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:48,058 WARN [DataStreamer for file /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 block BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45167,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK], DatanodeInfoWithStorage[127.0.0.1:36645,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36645,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]) is bad. 2024-11-22T13:36:48,058 WARN [PacketResponder: BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36645] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:48,059 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1319544701_22 at /127.0.0.1:47672 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45167:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47672 dst: /127.0.0.1:45167 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:48,059 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1319544701_22 at /127.0.0.1:44382 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:36645:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44382 dst: /127.0.0.1:36645 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:48,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7790ff99{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:48,118 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29b43d59{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:36:48,118 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:36:48,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@474594d6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:36:48,119 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@168cc4fb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,STOPPED} 2024-11-22T13:36:48,122 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:36:48,122 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:36:48,122 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:36:48,122 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891882766-172.17.0.2-1732282586777 (Datanode Uuid b4ffa5e1-70a8-4e25-bc37-c92e218af6d5) service to localhost/127.0.0.1:33787 2024-11-22T13:36:48,123 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data1/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:48,124 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data2/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:48,124 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:36:48,134 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:36:48,137 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:36:48,138 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:36:48,138 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:36:48,138 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:36:48,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10b7c71{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:36:48,139 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6023e2fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:36:48,233 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@160f7ba9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/java.io.tmpdir/jetty-localhost-34655-hadoop-hdfs-3_4_1-tests_jar-_-any-8207132268661456848/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:48,233 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3df3f65e{HTTP/1.1, (http/1.1)}{localhost:34655} 2024-11-22T13:36:48,234 INFO [Time-limited test {}] server.Server(415): Started @177413ms 2024-11-22T13:36:48,235 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:36:48,255 WARN [ResponseProcessor for block BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:48,256 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1319544701_22 at /127.0.0.1:47684 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:45167:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47684 dst: /127.0.0.1:45167 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:48,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35936f2e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:48,261 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4022a798{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:36:48,261 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:36:48,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66943013{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:36:48,261 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4eef3a93{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,STOPPED} 2024-11-22T13:36:48,262 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:36:48,262 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:36:48,262 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891882766-172.17.0.2-1732282586777 (Datanode Uuid c0dd6e3b-3203-46b9-b858-186208f131df) service to localhost/127.0.0.1:33787 2024-11-22T13:36:48,262 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:36:48,263 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data4/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:48,263 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data3/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:36:48,263 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:36:48,275 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:36:48,279 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:36:48,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:36:48,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:36:48,280 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:36:48,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a260e23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:36:48,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5d4a7998{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:36:48,387 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c7562d5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/java.io.tmpdir/jetty-localhost-36917-hadoop-hdfs-3_4_1-tests_jar-_-any-15684354597733672575/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:36:48,388 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3de0aa3a{HTTP/1.1, (http/1.1)}{localhost:36917} 2024-11-22T13:36:48,388 INFO [Time-limited test {}] server.Server(415): Started @177567ms 2024-11-22T13:36:48,389 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:36:48,749 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:48,757 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:48,902 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:36:48,905 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23a034a485634bd1 with lease ID 0x3caa58f0daae69b1: from storage DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b node DatanodeRegistration(127.0.0.1:34637, datanodeUuid=b4ffa5e1-70a8-4e25-bc37-c92e218af6d5, infoPort=42243, infoSecurePort=0, ipcPort=35081, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:48,905 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x23a034a485634bd1 with lease ID 0x3caa58f0daae69b1: from storage DS-4f790a47-b865-45bc-aa1c-bee1ec8fc60f node DatanodeRegistration(127.0.0.1:34637, datanodeUuid=b4ffa5e1-70a8-4e25-bc37-c92e218af6d5, infoPort=42243, infoSecurePort=0, ipcPort=35081, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T13:36:48,993 WARN [Thread-1434 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:36:48,996 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2c6b4e3d542c353 with lease ID 0x3caa58f0daae69b2: from storage DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5 node DatanodeRegistration(127.0.0.1:38077, datanodeUuid=c0dd6e3b-3203-46b9-b858-186208f131df, infoPort=35143, infoSecurePort=0, ipcPort=37303, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:48,996 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf2c6b4e3d542c353 with lease ID 0x3caa58f0daae69b2: from storage DS-b61a50e8-e83a-4278-a790-37468070bd81 node DatanodeRegistration(127.0.0.1:38077, datanodeUuid=c0dd6e3b-3203-46b9-b858-186208f131df, infoPort=35143, infoSecurePort=0, ipcPort=37303, storageInfo=lv=-57;cid=testClusterID;nsid=341789155;c=1732282586777), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:36:49,406 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-22T13:36:49,411 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-22T13:36:49,414 ERROR [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7-prefix:e025332d312f,43097,1732282589184 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45167,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:49,414 WARN [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7-prefix:e025332d312f,43097,1732282589184 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45167,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:49,415 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C43097%2C1732282589184:(num 1732282604034) roll requested 2024-11-22T13:36:49,415 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C43097%2C1732282589184.1732282609415 2024-11-22T13:36:49,422 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 newFile=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282609415 2024-11-22T13:36:49,422 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:49,422 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:49,422 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:49,423 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:49,423 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:49,423 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282609415 2024-11-22T13:36:49,423 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45167,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:49,423 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45167,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:49,423 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 2024-11-22T13:36:49,424 WARN [IPC Server handler 0 on default port 33787 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-22T13:36:49,424 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35143:35143),(127.0.0.1/127.0.0.1:42243:42243)] 2024-11-22T13:36:49,424 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 is not closed yet, will try archiving it next time 2024-11-22T13:36:49,424 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 after 1ms 2024-11-22T13:36:49,750 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:49,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:50,752 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:50,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:51,426 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C43097%2C1732282589184.1732282611425 2024-11-22T13:36:51,437 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282609415 newFile=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 2024-11-22T13:36:51,438 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:51,438 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:51,438 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:51,438 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:51,439 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:51,439 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282609415 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 2024-11-22T13:36:51,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741838_1019 (size=1264) 2024-11-22T13:36:51,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741838_1019 (size=1264) 2024-11-22T13:36:51,442 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 is not closed yet, will try archiving it next time 2024-11-22T13:36:51,444 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35143:35143),(127.0.0.1/127.0.0.1:42243:42243)] 2024-11-22T13:36:51,444 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 is not closed yet, will try archiving it next time 2024-11-22T13:36:51,445 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 2024-11-22T13:36:51,445 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 2024-11-22T13:36:51,445 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 after 0ms 2024-11-22T13:36:51,445 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 2024-11-22T13:36:51,454 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732282591007/Put/vlen=218/seqid=0] 2024-11-22T13:36:51,454 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732282600712/Put/vlen=1045/seqid=0] 2024-11-22T13:36:51,454 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282590012 2024-11-22T13:36:51,454 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 2024-11-22T13:36:51,454 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 2024-11-22T13:36:51,455 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 after 1ms 2024-11-22T13:36:51,455 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 2024-11-22T13:36:51,458 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732282604032/Put/vlen=1045/seqid=0] 2024-11-22T13:36:51,458 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732282606052/Put/vlen=1045/seqid=0] 2024-11-22T13:36:51,459 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 2024-11-22T13:36:51,459 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282609415 2024-11-22T13:36:51,459 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282609415 2024-11-22T13:36:51,459 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282609415 after 0ms 2024-11-22T13:36:51,459 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282609415 2024-11-22T13:36:51,462 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732282609413/Put/vlen=1045/seqid=0] 2024-11-22T13:36:51,462 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 2024-11-22T13:36:51,462 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 2024-11-22T13:36:51,463 WARN [IPC Server handler 1 on default port 33787 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-22T13:36:51,463 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 after 1ms 2024-11-22T13:36:51,753 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:51,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:51,999 WARN [ResponseProcessor for block BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:51,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1687910554_22 at /127.0.0.1:57388 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:38077:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57388 dst: /127.0.0.1:38077 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:38077 remote=/127.0.0.1:57388]. Total timeout mills is 60000, 59438 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:51,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1687910554_22 at /127.0.0.1:36132 [Receiving block BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34637:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36132 dst: /127.0.0.1:34637 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:36:52,000 WARN [DataStreamer for file /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 block BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38077,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK], DatanodeInfoWithStorage[127.0.0.1:34637,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38077,DS-c9e324eb-9c9f-472d-9d8d-f8268ea2e2d5,DISK]) is bad. 2024-11-22T13:36:52,003 WARN [DataStreamer for file /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 block BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:52,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741839_1022 (size=85) 2024-11-22T13:36:52,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741839_1022 (size=85) 2024-11-22T13:36:52,755 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:52,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:53,426 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282604034 after 4003ms 2024-11-22T13:36:53,756 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:53,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:54,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:54,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:54,907 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T13:36:55,465 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 after 4003ms 2024-11-22T13:36:55,465 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 2024-11-22T13:36:55,475 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 2024-11-22T13:36:55,475 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 45814694ef682d42714d8c280c049576 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-22T13:36:55,476 ERROR [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7-prefix:e025332d312f,43097,1732282589184 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:55,476 WARN [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7-prefix:e025332d312f,43097,1732282589184 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:55,476 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C43097%2C1732282589184:(num 1732282611425) roll requested 2024-11-22T13:36:55,477 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C43097%2C1732282589184.1732282615477 2024-11-22T13:36:55,483 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 newFile=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282615477 2024-11-22T13:36:55,483 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,483 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,483 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,483 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,483 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,483 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282615477 2024-11-22T13:36:55,484 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:55,484 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1891882766-172.17.0.2-1732282586777:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:55,484 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 2024-11-22T13:36:55,485 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42243:42243),(127.0.0.1/127.0.0.1:35143:35143)] 2024-11-22T13:36:55,485 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 is not closed yet, will try archiving it next time 2024-11-22T13:36:55,485 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 after 1ms 2024-11-22T13:36:55,486 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.1732282611425 to hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/oldWALs/e025332d312f%2C43097%2C1732282589184.1732282611425 2024-11-22T13:36:55,501 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/default/TestLogRolling-testLogRollOnPipelineRestart/45814694ef682d42714d8c280c049576/.tmp/info/07b2e96e8abe4b2b94ed037eed516c5e is 1080, key is row1002/info:/1732282600712/Put/seqid=0 2024-11-22T13:36:55,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741841_1024 (size=9270) 2024-11-22T13:36:55,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741841_1024 (size=9270) 2024-11-22T13:36:55,506 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/default/TestLogRolling-testLogRollOnPipelineRestart/45814694ef682d42714d8c280c049576/.tmp/info/07b2e96e8abe4b2b94ed037eed516c5e 2024-11-22T13:36:55,512 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/default/TestLogRolling-testLogRollOnPipelineRestart/45814694ef682d42714d8c280c049576/.tmp/info/07b2e96e8abe4b2b94ed037eed516c5e as hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/default/TestLogRolling-testLogRollOnPipelineRestart/45814694ef682d42714d8c280c049576/info/07b2e96e8abe4b2b94ed037eed516c5e 2024-11-22T13:36:55,518 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/default/TestLogRolling-testLogRollOnPipelineRestart/45814694ef682d42714d8c280c049576/info/07b2e96e8abe4b2b94ed037eed516c5e, entries=4, sequenceid=8, filesize=9.1 K 2024-11-22T13:36:55,519 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 45814694ef682d42714d8c280c049576 in 44ms, sequenceid=8, compaction requested=false 2024-11-22T13:36:55,519 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 45814694ef682d42714d8c280c049576: 2024-11-22T13:36:55,519 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-22T13:36:55,519 ERROR [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7-prefix:e025332d312f,43097,1732282589184.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:55,520 WARN [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7-prefix:e025332d312f,43097,1732282589184.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:55,520 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C43097%2C1732282589184.meta:.meta(num 1732282590415) roll requested 2024-11-22T13:36:55,520 INFO [regionserver/e025332d312f:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C43097%2C1732282589184.meta.1732282615520.meta 2024-11-22T13:36:55,525 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,525 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,525 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,525 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,525 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,525 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282615520.meta 2024-11-22T13:36:55,525 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:55,526 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:55,526 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta 2024-11-22T13:36:55,526 WARN [IPC Server handler 2 on default port 33787 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1014 2024-11-22T13:36:55,526 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta after 0ms 2024-11-22T13:36:55,528 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35143:35143),(127.0.0.1/127.0.0.1:42243:42243)] 2024-11-22T13:36:55,528 DEBUG [regionserver/e025332d312f:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta is not closed yet, will try archiving it next time 2024-11-22T13:36:55,542 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/.tmp/info/0b33668680104881b78e0da5b489a510 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576./info:regioninfo/1732282591012/Put/seqid=0 2024-11-22T13:36:55,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741843_1027 (size=7125) 2024-11-22T13:36:55,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741843_1027 (size=7125) 2024-11-22T13:36:55,547 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/.tmp/info/0b33668680104881b78e0da5b489a510 2024-11-22T13:36:55,567 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/.tmp/ns/94acc71200b842eb8a391eb1d8097def is 43, key is default/ns:d/1732282590505/Put/seqid=0 2024-11-22T13:36:55,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741844_1028 (size=5153) 2024-11-22T13:36:55,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741844_1028 (size=5153) 2024-11-22T13:36:55,572 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/.tmp/ns/94acc71200b842eb8a391eb1d8097def 2024-11-22T13:36:55,591 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/.tmp/table/a2cacffc1ace4921a895b7a5cab5901a is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732282591022/Put/seqid=0 2024-11-22T13:36:55,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741845_1029 (size=5438) 2024-11-22T13:36:55,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741845_1029 (size=5438) 2024-11-22T13:36:55,596 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/.tmp/table/a2cacffc1ace4921a895b7a5cab5901a 2024-11-22T13:36:55,601 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/.tmp/info/0b33668680104881b78e0da5b489a510 as hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/info/0b33668680104881b78e0da5b489a510 2024-11-22T13:36:55,607 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/info/0b33668680104881b78e0da5b489a510, entries=10, sequenceid=11, filesize=7.0 K 2024-11-22T13:36:55,608 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/.tmp/ns/94acc71200b842eb8a391eb1d8097def as hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/ns/94acc71200b842eb8a391eb1d8097def 2024-11-22T13:36:55,615 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/ns/94acc71200b842eb8a391eb1d8097def, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T13:36:55,616 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/.tmp/table/a2cacffc1ace4921a895b7a5cab5901a as hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/table/a2cacffc1ace4921a895b7a5cab5901a 2024-11-22T13:36:55,621 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/table/a2cacffc1ace4921a895b7a5cab5901a, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T13:36:55,623 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 104ms, sequenceid=11, compaction requested=false 2024-11-22T13:36:55,623 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T13:36:55,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T13:36:55,628 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:36:55,628 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:36:55,628 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:55,628 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:55,628 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T13:36:55,628 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T13:36:55,628 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1746566449, stopped=false 2024-11-22T13:36:55,629 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e025332d312f,35061,1732282589011 2024-11-22T13:36:55,758 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:55,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:55,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:36:55,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:36:55,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:55,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:55,784 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:36:55,785 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:36:55,786 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:36:55,786 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:36:55,786 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:36:55,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:55,786 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e025332d312f,43097,1732282589184' ***** 2024-11-22T13:36:55,786 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T13:36:55,787 INFO [RS:0;e025332d312f:43097 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T13:36:55,788 INFO [RS:0;e025332d312f:43097 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T13:36:55,788 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T13:36:55,788 INFO [RS:0;e025332d312f:43097 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T13:36:55,788 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(3091): Received CLOSE for 45814694ef682d42714d8c280c049576 2024-11-22T13:36:55,788 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(959): stopping server e025332d312f,43097,1732282589184 2024-11-22T13:36:55,788 INFO [RS:0;e025332d312f:43097 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:36:55,788 INFO [RS:0;e025332d312f:43097 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e025332d312f:43097. 2024-11-22T13:36:55,788 DEBUG [RS:0;e025332d312f:43097 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:36:55,788 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 45814694ef682d42714d8c280c049576, disabling compactions & flushes 2024-11-22T13:36:55,789 DEBUG [RS:0;e025332d312f:43097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:55,789 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:55,789 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:55,789 INFO [RS:0;e025332d312f:43097 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T13:36:55,789 INFO [RS:0;e025332d312f:43097 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T13:36:55,789 INFO [RS:0;e025332d312f:43097 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T13:36:55,789 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. after waiting 0 ms 2024-11-22T13:36:55,789 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T13:36:55,789 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:55,789 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T13:36:55,789 DEBUG [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(1325): Online Regions={45814694ef682d42714d8c280c049576=TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T13:36:55,789 DEBUG [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 45814694ef682d42714d8c280c049576 2024-11-22T13:36:55,790 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:36:55,790 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:36:55,790 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:36:55,790 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:36:55,790 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:36:55,795 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/default/TestLogRolling-testLogRollOnPipelineRestart/45814694ef682d42714d8c280c049576/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-22T13:36:55,795 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T13:36:55,795 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:55,795 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 45814694ef682d42714d8c280c049576: Waiting for close lock at 1732282615788Running coprocessor pre-close hooks at 1732282615788Disabling compacts and flushes for region at 1732282615788Disabling writes for close at 1732282615789 (+1 ms)Writing region close event to WAL at 1732282615790 (+1 ms)Running coprocessor post-close hooks at 1732282615795 (+5 ms)Closed at 1732282615795 2024-11-22T13:36:55,796 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732282590637.45814694ef682d42714d8c280c049576. 2024-11-22T13:36:55,796 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:36:55,796 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:36:55,796 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282615789Running coprocessor pre-close hooks at 1732282615790 (+1 ms)Disabling compacts and flushes for region at 1732282615790Disabling writes for close at 1732282615790Writing region close event to WAL at 1732282615791 (+1 ms)Running coprocessor post-close hooks at 1732282615796 (+5 ms)Closed at 1732282615796 2024-11-22T13:36:55,796 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T13:36:55,880 INFO [regionserver/e025332d312f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:36:55,925 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T13:36:55,926 INFO [regionserver/e025332d312f:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T13:36:55,990 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(976): stopping server e025332d312f,43097,1732282589184; all regions closed. 2024-11-22T13:36:55,991 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,991 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,992 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,992 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,992 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:55,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741842_1025 (size=825) 2024-11-22T13:36:55,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741842_1025 (size=825) 2024-11-22T13:36:56,759 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:56,766 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:56,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:36:56,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T13:36:56,900 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T13:36:57,761 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:57,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:57,998 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1014: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T13:36:58,762 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:58,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:58,990 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T13:36:59,528 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta after 4002ms 2024-11-22T13:36:59,529 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/WALs/e025332d312f,43097,1732282589184/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta to hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/oldWALs/e025332d312f%2C43097%2C1732282589184.meta.1732282590415.meta 2024-11-22T13:36:59,538 DEBUG [RS:0;e025332d312f:43097 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/oldWALs 2024-11-22T13:36:59,538 INFO [RS:0;e025332d312f:43097 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C43097%2C1732282589184.meta:.meta(num 1732282615520) 2024-11-22T13:36:59,539 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,539 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,540 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,540 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,540 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741840_1023 (size=1162) 2024-11-22T13:36:59,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741840_1023 (size=1162) 2024-11-22T13:36:59,547 DEBUG [RS:0;e025332d312f:43097 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/oldWALs 2024-11-22T13:36:59,548 INFO [RS:0;e025332d312f:43097 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C43097%2C1732282589184:(num 1732282615477) 2024-11-22T13:36:59,548 DEBUG [RS:0;e025332d312f:43097 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:36:59,548 INFO [RS:0;e025332d312f:43097 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:36:59,548 INFO [RS:0;e025332d312f:43097 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:36:59,548 INFO [RS:0;e025332d312f:43097 {}] hbase.ChoreService(370): Chore service for: regionserver/e025332d312f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T13:36:59,548 INFO [RS:0;e025332d312f:43097 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:36:59,548 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:36:59,548 INFO [RS:0;e025332d312f:43097 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43097 2024-11-22T13:36:59,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:36:59,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e025332d312f,43097,1732282589184 2024-11-22T13:36:59,615 INFO [RS:0;e025332d312f:43097 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:36:59,626 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e025332d312f,43097,1732282589184] 2024-11-22T13:36:59,636 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e025332d312f,43097,1732282589184 already deleted, retry=false 2024-11-22T13:36:59,636 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e025332d312f,43097,1732282589184 expired; onlineServers=0 2024-11-22T13:36:59,636 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e025332d312f,35061,1732282589011' ***** 2024-11-22T13:36:59,636 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T13:36:59,637 INFO [M:0;e025332d312f:35061 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:36:59,637 INFO [M:0;e025332d312f:35061 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:36:59,637 DEBUG [M:0;e025332d312f:35061 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T13:36:59,637 DEBUG [M:0;e025332d312f:35061 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T13:36:59,637 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T13:36:59,637 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282589729 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282589729,5,FailOnTimeoutGroup] 2024-11-22T13:36:59,637 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282589729 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282589729,5,FailOnTimeoutGroup] 2024-11-22T13:36:59,638 INFO [M:0;e025332d312f:35061 {}] hbase.ChoreService(370): Chore service for: master/e025332d312f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T13:36:59,638 INFO [M:0;e025332d312f:35061 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:36:59,638 DEBUG [M:0;e025332d312f:35061 {}] master.HMaster(1795): Stopping service threads 2024-11-22T13:36:59,639 INFO [M:0;e025332d312f:35061 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T13:36:59,639 INFO [M:0;e025332d312f:35061 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:36:59,639 INFO [M:0;e025332d312f:35061 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T13:36:59,639 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T13:36:59,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T13:36:59,646 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:36:59,646 DEBUG [M:0;e025332d312f:35061 {}] zookeeper.ZKUtil(347): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T13:36:59,646 WARN [M:0;e025332d312f:35061 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T13:36:59,647 INFO [M:0;e025332d312f:35061 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/.lastflushedseqids 2024-11-22T13:36:59,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741846_1030 (size=111) 2024-11-22T13:36:59,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741846_1030 (size=111) 2024-11-22T13:36:59,653 INFO [M:0;e025332d312f:35061 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T13:36:59,653 INFO [M:0;e025332d312f:35061 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T13:36:59,653 DEBUG [M:0;e025332d312f:35061 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:36:59,653 INFO [M:0;e025332d312f:35061 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:59,653 DEBUG [M:0;e025332d312f:35061 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:59,653 DEBUG [M:0;e025332d312f:35061 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:36:59,653 DEBUG [M:0;e025332d312f:35061 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:59,653 INFO [M:0;e025332d312f:35061 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-22T13:36:59,653 ERROR [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData-prefix:e025332d312f,35061,1732282589011 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:59,654 WARN [FSHLog-0-hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData-prefix:e025332d312f,35061,1732282589011 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:59,654 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog e025332d312f%2C35061%2C1732282589011:(num 1732282589324) roll requested 2024-11-22T13:36:59,654 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C35061%2C1732282589011.1732282619654 2024-11-22T13:36:59,659 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,659 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,659 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,659 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,659 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,660 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282589324 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282619654 2024-11-22T13:36:59,660 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:59,660 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:41283,DS-7eae6ce6-0d80-4439-a03b-46c2b9af839b,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T13:36:59,660 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282589324 2024-11-22T13:36:59,661 WARN [IPC Server handler 0 on default port 33787 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282589324 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-22T13:36:59,661 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42243:42243),(127.0.0.1/127.0.0.1:35143:35143)] 2024-11-22T13:36:59,661 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282589324 is not closed yet, will try archiving it next time 2024-11-22T13:36:59,661 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282589324 after 1ms 2024-11-22T13:36:59,679 DEBUG [M:0;e025332d312f:35061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b2f228ec21bd44e7a6896a7a1d77a9d4 is 82, key is hbase:meta,,1/info:regioninfo/1732282590451/Put/seqid=0 2024-11-22T13:36:59,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741848_1033 (size=5672) 2024-11-22T13:36:59,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741848_1033 (size=5672) 2024-11-22T13:36:59,684 INFO [M:0;e025332d312f:35061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b2f228ec21bd44e7a6896a7a1d77a9d4 2024-11-22T13:36:59,705 DEBUG [M:0;e025332d312f:35061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c3da1c99253840f499120ca746a5da35 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732282591027/Put/seqid=0 2024-11-22T13:36:59,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741849_1034 (size=6118) 2024-11-22T13:36:59,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741849_1034 (size=6118) 2024-11-22T13:36:59,711 INFO [M:0;e025332d312f:35061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c3da1c99253840f499120ca746a5da35 2024-11-22T13:36:59,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:36:59,726 INFO [RS:0;e025332d312f:43097 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:36:59,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43097-0x10162c29aed0001, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:36:59,726 INFO [RS:0;e025332d312f:43097 {}] regionserver.HRegionServer(1031): Exiting; stopping=e025332d312f,43097,1732282589184; zookeeper connection closed. 2024-11-22T13:36:59,726 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5988fa00 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5988fa00 2024-11-22T13:36:59,726 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T13:36:59,728 DEBUG [M:0;e025332d312f:35061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ad7acdad39194a28aa243e67e4647b92 is 69, key is e025332d312f,43097,1732282589184/rs:state/1732282589860/Put/seqid=0 2024-11-22T13:36:59,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741850_1035 (size=5156) 2024-11-22T13:36:59,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741850_1035 (size=5156) 2024-11-22T13:36:59,733 INFO [M:0;e025332d312f:35061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ad7acdad39194a28aa243e67e4647b92 2024-11-22T13:36:59,750 DEBUG [M:0;e025332d312f:35061 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0dd9ddd4cce74a26b24c6d27c7a3ca7e is 52, key is load_balancer_on/state:d/1732282590631/Put/seqid=0 2024-11-22T13:36:59,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741851_1036 (size=5056) 2024-11-22T13:36:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741851_1036 (size=5056) 2024-11-22T13:36:59,756 INFO [M:0;e025332d312f:35061 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0dd9ddd4cce74a26b24c6d27c7a3ca7e 2024-11-22T13:36:59,761 DEBUG [M:0;e025332d312f:35061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b2f228ec21bd44e7a6896a7a1d77a9d4 as hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b2f228ec21bd44e7a6896a7a1d77a9d4 2024-11-22T13:36:59,763 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:59,766 INFO [M:0;e025332d312f:35061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b2f228ec21bd44e7a6896a7a1d77a9d4, entries=8, sequenceid=56, filesize=5.5 K 2024-11-22T13:36:59,767 DEBUG [M:0;e025332d312f:35061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c3da1c99253840f499120ca746a5da35 as hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c3da1c99253840f499120ca746a5da35 2024-11-22T13:36:59,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:36:59,772 INFO [M:0;e025332d312f:35061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c3da1c99253840f499120ca746a5da35, entries=6, sequenceid=56, filesize=6.0 K 2024-11-22T13:36:59,773 DEBUG [M:0;e025332d312f:35061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ad7acdad39194a28aa243e67e4647b92 as hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ad7acdad39194a28aa243e67e4647b92 2024-11-22T13:36:59,779 INFO [M:0;e025332d312f:35061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ad7acdad39194a28aa243e67e4647b92, entries=1, sequenceid=56, filesize=5.0 K 2024-11-22T13:36:59,780 DEBUG [M:0;e025332d312f:35061 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/0dd9ddd4cce74a26b24c6d27c7a3ca7e as hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0dd9ddd4cce74a26b24c6d27c7a3ca7e 2024-11-22T13:36:59,785 INFO [M:0;e025332d312f:35061 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/0dd9ddd4cce74a26b24c6d27c7a3ca7e, entries=1, sequenceid=56, filesize=4.9 K 2024-11-22T13:36:59,786 INFO [M:0;e025332d312f:35061 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=56, compaction requested=false 2024-11-22T13:36:59,788 INFO [M:0;e025332d312f:35061 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:36:59,788 DEBUG [M:0;e025332d312f:35061 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282619653Disabling compacts and flushes for region at 1732282619653Disabling writes for close at 1732282619653Obtaining lock to block concurrent updates at 1732282619653Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732282619653Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732282619653Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732282619661 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732282619661Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732282619678 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732282619678Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732282619690 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732282619705 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732282619705Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732282619715 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732282619728 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732282619728Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732282619737 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732282619750 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732282619750Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@78676127: reopening flushed file at 1732282619760 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72879fc0: reopening flushed file at 1732282619766 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cb394e4: reopening flushed file at 1732282619772 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d2eeb34: reopening flushed file at 1732282619779 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=56, compaction requested=false at 1732282619786 (+7 ms)Writing region close event to WAL at 1732282619788 (+2 ms)Closed at 1732282619788 2024-11-22T13:36:59,788 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,788 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,788 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,789 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,789 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:36:59,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38077 is added to blk_1073741847_1031 (size=757) 2024-11-22T13:36:59,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34637 is added to blk_1073741847_1031 (size=757) 2024-11-22T13:37:00,764 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:00,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:00,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,832 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:00,997 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T13:37:01,335 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T13:37:01,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,340 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,360 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,361 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,365 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,367 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:01,765 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:01,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:02,767 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:02,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:03,663 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282589324 after 4002ms 2024-11-22T13:37:03,664 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/WALs/e025332d312f,35061,1732282589011/e025332d312f%2C35061%2C1732282589011.1732282589324 to hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/oldWALs/e025332d312f%2C35061%2C1732282589011.1732282589324 2024-11-22T13:37:03,669 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/MasterData/oldWALs/e025332d312f%2C35061%2C1732282589011.1732282589324 to hdfs://localhost:33787/user/jenkins/test-data/1df5d970-9d71-f92c-2064-f5ed481f2ce7/oldWALs/e025332d312f%2C35061%2C1732282589011.1732282589324$masterlocalwal$ 2024-11-22T13:37:03,670 INFO [M:0;e025332d312f:35061 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T13:37:03,670 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:37:03,670 INFO [M:0;e025332d312f:35061 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35061 2024-11-22T13:37:03,670 INFO [M:0;e025332d312f:35061 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:37:03,768 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:03,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:03,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:37:03,821 INFO [M:0;e025332d312f:35061 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:37:03,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35061-0x10162c29aed0000, quorum=127.0.0.1:61784, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:37:03,827 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c7562d5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:37:03,828 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3de0aa3a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:37:03,829 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:37:03,829 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5d4a7998{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:37:03,829 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a260e23{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,STOPPED} 2024-11-22T13:37:03,832 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:37:03,833 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:37:03,833 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891882766-172.17.0.2-1732282586777 (Datanode Uuid c0dd6e3b-3203-46b9-b858-186208f131df) service to localhost/127.0.0.1:33787 2024-11-22T13:37:03,833 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:37:03,833 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data3/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:37:03,834 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data4/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:37:03,834 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:37:03,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@160f7ba9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:37:03,836 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3df3f65e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:37:03,836 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:37:03,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6023e2fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:37:03,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10b7c71{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,STOPPED} 2024-11-22T13:37:03,837 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:37:03,837 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:37:03,837 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:37:03,837 WARN [BP-1891882766-172.17.0.2-1732282586777 heartbeating to localhost/127.0.0.1:33787 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1891882766-172.17.0.2-1732282586777 (Datanode Uuid b4ffa5e1-70a8-4e25-bc37-c92e218af6d5) service to localhost/127.0.0.1:33787 2024-11-22T13:37:03,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data1/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:37:03,837 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/cluster_f9370214-6bc1-1414-60f0-399df91e04ee/data/data2/current/BP-1891882766-172.17.0.2-1732282586777 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:37:03,837 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:37:03,842 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1fc8bed8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:37:03,842 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42aa99e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:37:03,843 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:37:03,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2835f29c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:37:03,843 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@140caf6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir/,STOPPED} 2024-11-22T13:37:03,849 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T13:37:03,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T13:37:03,874 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=182 (was 157) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33787 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33787 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:33787 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33787 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33787 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33787 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:33787 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33787 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=71 (was 64) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2399 (was 2550) 2024-11-22T13:37:03,881 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=182, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=71, ProcessCount=11, AvailableMemoryMB=2399 2024-11-22T13:37:03,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T13:37:03,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.log.dir so I do NOT create it in target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9 2024-11-22T13:37:03,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/1f03fd53-708f-9a7e-a32f-5b59aaa27d58/hadoop.tmp.dir so I do NOT create it in target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9 2024-11-22T13:37:03,881 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc, deleteOnExit=true 2024-11-22T13:37:03,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T13:37:03,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/test.cache.data in system properties and HBase conf 2024-11-22T13:37:03,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T13:37:03,881 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.log.dir in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T13:37:03,882 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:37:03,882 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:37:03,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T13:37:03,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/nfs.dump.dir in system properties and HBase conf 2024-11-22T13:37:03,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/java.io.tmpdir in system properties and HBase conf 2024-11-22T13:37:03,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:37:03,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T13:37:03,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T13:37:03,896 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:37:04,264 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:37:04,268 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:37:04,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:37:04,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:37:04,277 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:37:04,280 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:37:04,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6826318a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:37:04,281 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18701e65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:37:04,373 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@678c2527{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/java.io.tmpdir/jetty-localhost-37265-hadoop-hdfs-3_4_1-tests_jar-_-any-8810391437982157567/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:37:04,373 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@191911fe{HTTP/1.1, (http/1.1)}{localhost:37265} 2024-11-22T13:37:04,373 INFO [Time-limited test {}] server.Server(415): Started @193553ms 2024-11-22T13:37:04,384 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:37:04,666 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:37:04,669 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:37:04,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:37:04,670 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:37:04,670 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:37:04,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ec76923{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:37:04,670 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f411ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:37:04,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c6abbb8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/java.io.tmpdir/jetty-localhost-45239-hadoop-hdfs-3_4_1-tests_jar-_-any-7402224796228211032/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:37:04,765 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@555a4a92{HTTP/1.1, (http/1.1)}{localhost:45239} 2024-11-22T13:37:04,765 INFO [Time-limited test {}] server.Server(415): Started @193944ms 2024-11-22T13:37:04,766 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:37:04,769 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:04,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:04,790 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:37:04,793 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:37:04,794 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:37:04,794 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:37:04,794 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:37:04,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bc5f936{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:37:04,795 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2972d60a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:37:04,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4fb491f6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/java.io.tmpdir/jetty-localhost-41017-hadoop-hdfs-3_4_1-tests_jar-_-any-3793087610418344194/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:37:04,887 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@16217bcd{HTTP/1.1, (http/1.1)}{localhost:41017} 2024-11-22T13:37:04,888 INFO [Time-limited test {}] server.Server(415): Started @194067ms 2024-11-22T13:37:04,889 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:37:05,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:05,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:06,078 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/data/data1/current/BP-844324917-172.17.0.2-1732282623906/current, will proceed with Du for space computation calculation, 2024-11-22T13:37:06,078 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/data/data2/current/BP-844324917-172.17.0.2-1732282623906/current, will proceed with Du for space computation calculation, 2024-11-22T13:37:06,094 WARN [Thread-1618 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:37:06,096 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4852e44be4574d8 with lease ID 0x31eda466f1235f75: Processing first storage report for DS-8ffb14eb-8575-432a-9756-8fe67cf6323c from datanode DatanodeRegistration(127.0.0.1:44673, datanodeUuid=2ef7c1a7-3aee-451a-bb94-8d270a8f8743, infoPort=41081, infoSecurePort=0, ipcPort=45955, storageInfo=lv=-57;cid=testClusterID;nsid=1306028509;c=1732282623906) 2024-11-22T13:37:06,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4852e44be4574d8 with lease ID 0x31eda466f1235f75: from storage DS-8ffb14eb-8575-432a-9756-8fe67cf6323c node DatanodeRegistration(127.0.0.1:44673, datanodeUuid=2ef7c1a7-3aee-451a-bb94-8d270a8f8743, infoPort=41081, infoSecurePort=0, ipcPort=45955, storageInfo=lv=-57;cid=testClusterID;nsid=1306028509;c=1732282623906), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:37:06,097 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4852e44be4574d8 with lease ID 0x31eda466f1235f75: Processing first storage report for DS-8ebadc15-6370-4e56-b621-a0066abb27f1 from datanode DatanodeRegistration(127.0.0.1:44673, datanodeUuid=2ef7c1a7-3aee-451a-bb94-8d270a8f8743, infoPort=41081, infoSecurePort=0, ipcPort=45955, storageInfo=lv=-57;cid=testClusterID;nsid=1306028509;c=1732282623906) 2024-11-22T13:37:06,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4852e44be4574d8 with lease ID 0x31eda466f1235f75: from storage DS-8ebadc15-6370-4e56-b621-a0066abb27f1 node DatanodeRegistration(127.0.0.1:44673, datanodeUuid=2ef7c1a7-3aee-451a-bb94-8d270a8f8743, infoPort=41081, infoSecurePort=0, ipcPort=45955, storageInfo=lv=-57;cid=testClusterID;nsid=1306028509;c=1732282623906), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:37:06,289 WARN [Thread-1665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/data/data3/current/BP-844324917-172.17.0.2-1732282623906/current, will proceed with Du for space computation calculation, 2024-11-22T13:37:06,289 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/data/data4/current/BP-844324917-172.17.0.2-1732282623906/current, will proceed with Du for space computation calculation, 2024-11-22T13:37:06,308 WARN [Thread-1641 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:37:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb71a2b89dff0189 with lease ID 0x31eda466f1235f76: Processing first storage report for DS-c8300cd4-cbb0-4414-a7f9-853e4fd6d54a from datanode DatanodeRegistration(127.0.0.1:36567, datanodeUuid=3c91a1c1-40f6-41a1-b2a0-b21ac4950544, infoPort=43847, infoSecurePort=0, ipcPort=41979, storageInfo=lv=-57;cid=testClusterID;nsid=1306028509;c=1732282623906) 2024-11-22T13:37:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb71a2b89dff0189 with lease ID 0x31eda466f1235f76: from storage DS-c8300cd4-cbb0-4414-a7f9-853e4fd6d54a node DatanodeRegistration(127.0.0.1:36567, datanodeUuid=3c91a1c1-40f6-41a1-b2a0-b21ac4950544, infoPort=43847, infoSecurePort=0, ipcPort=41979, storageInfo=lv=-57;cid=testClusterID;nsid=1306028509;c=1732282623906), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:37:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbb71a2b89dff0189 with lease ID 0x31eda466f1235f76: Processing first storage report for DS-58114a26-267f-4996-abe5-b70a6eca2eb8 from datanode DatanodeRegistration(127.0.0.1:36567, datanodeUuid=3c91a1c1-40f6-41a1-b2a0-b21ac4950544, infoPort=43847, infoSecurePort=0, ipcPort=41979, storageInfo=lv=-57;cid=testClusterID;nsid=1306028509;c=1732282623906) 2024-11-22T13:37:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbb71a2b89dff0189 with lease ID 0x31eda466f1235f76: from storage DS-58114a26-267f-4996-abe5-b70a6eca2eb8 node DatanodeRegistration(127.0.0.1:36567, datanodeUuid=3c91a1c1-40f6-41a1-b2a0-b21ac4950544, infoPort=43847, infoSecurePort=0, ipcPort=41979, storageInfo=lv=-57;cid=testClusterID;nsid=1306028509;c=1732282623906), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:37:06,323 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9 2024-11-22T13:37:06,326 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/zookeeper_0, clientPort=64936, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T13:37:06,327 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64936 2024-11-22T13:37:06,327 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:37:06,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:37:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:37:06,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:37:06,341 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7 with version=8 2024-11-22T13:37:06,341 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/hbase-staging 2024-11-22T13:37:06,343 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:37:06,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:37:06,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:37:06,343 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:37:06,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:37:06,343 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:37:06,343 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T13:37:06,344 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:37:06,344 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37969 2024-11-22T13:37:06,346 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37969 connecting to ZooKeeper ensemble=127.0.0.1:64936 2024-11-22T13:37:06,402 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379690x0, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:37:06,403 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37969-0x10162c32cc00000 connected 2024-11-22T13:37:06,584 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:37:06,587 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:37:06,591 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:37:06,592 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7, hbase.cluster.distributed=false 2024-11-22T13:37:06,595 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:37:06,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37969 2024-11-22T13:37:06,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37969 2024-11-22T13:37:06,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37969 2024-11-22T13:37:06,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37969 2024-11-22T13:37:06,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37969 2024-11-22T13:37:06,611 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:37:06,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:37:06,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:37:06,611 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:37:06,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:37:06,611 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:37:06,611 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T13:37:06,611 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:37:06,612 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44147 2024-11-22T13:37:06,614 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44147 connecting to ZooKeeper ensemble=127.0.0.1:64936 2024-11-22T13:37:06,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:37:06,616 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:37:06,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:441470x0, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:37:06,625 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44147-0x10162c32cc00001 connected 2024-11-22T13:37:06,625 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:37:06,625 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T13:37:06,626 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T13:37:06,626 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T13:37:06,627 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:37:06,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44147 2024-11-22T13:37:06,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44147 2024-11-22T13:37:06,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44147 2024-11-22T13:37:06,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44147 2024-11-22T13:37:06,629 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44147 2024-11-22T13:37:06,644 DEBUG [M:0;e025332d312f:37969 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e025332d312f:37969 2024-11-22T13:37:06,644 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e025332d312f,37969,1732282626343 2024-11-22T13:37:06,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:37:06,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:37:06,657 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e025332d312f,37969,1732282626343 2024-11-22T13:37:06,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T13:37:06,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,668 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T13:37:06,668 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e025332d312f,37969,1732282626343 from backup master directory 2024-11-22T13:37:06,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e025332d312f,37969,1732282626343 2024-11-22T13:37:06,677 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:37:06,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:37:06,678 WARN [master/e025332d312f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:37:06,678 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e025332d312f,37969,1732282626343 2024-11-22T13:37:06,686 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/hbase.id] with ID: d23bbc96-9cdc-42fd-b9ef-0c389b6dfc8f 2024-11-22T13:37:06,686 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/.tmp/hbase.id 2024-11-22T13:37:06,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:37:06,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:37:06,693 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/.tmp/hbase.id]:[hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/hbase.id] 2024-11-22T13:37:06,703 INFO [master/e025332d312f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:37:06,703 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T13:37:06,705 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T13:37:06,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:37:06,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:37:06,725 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:37:06,726 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T13:37:06,726 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:37:06,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:37:06,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:37:06,734 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store 2024-11-22T13:37:06,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:37:06,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:37:06,740 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:37:06,741 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:37:06,741 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:37:06,741 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:37:06,741 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:37:06,741 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:37:06,741 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:37:06,741 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282626741Disabling compacts and flushes for region at 1732282626741Disabling writes for close at 1732282626741Writing region close event to WAL at 1732282626741Closed at 1732282626741 2024-11-22T13:37:06,742 WARN [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/.initializing 2024-11-22T13:37:06,742 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/WALs/e025332d312f,37969,1732282626343 2024-11-22T13:37:06,744 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C37969%2C1732282626343, suffix=, logDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/WALs/e025332d312f,37969,1732282626343, archiveDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/oldWALs, maxLogs=10 2024-11-22T13:37:06,744 INFO [master/e025332d312f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C37969%2C1732282626343.1732282626744 2024-11-22T13:37:06,749 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/WALs/e025332d312f,37969,1732282626343/e025332d312f%2C37969%2C1732282626343.1732282626744 2024-11-22T13:37:06,750 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43847:43847),(127.0.0.1/127.0.0.1:41081:41081)] 2024-11-22T13:37:06,750 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:37:06,751 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:37:06,751 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,751 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,752 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,753 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T13:37:06,753 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:06,754 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:37:06,754 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,755 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T13:37:06,755 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:06,755 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:37:06,755 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,756 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T13:37:06,756 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:06,757 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:37:06,757 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,758 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T13:37:06,758 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:06,758 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:37:06,758 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,759 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,759 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,761 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,761 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,762 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T13:37:06,763 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:37:06,765 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:37:06,765 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787466, jitterRate=0.0013159513473510742}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T13:37:06,766 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732282626751Initializing all the Stores at 1732282626752 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282626752Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282626752Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282626752Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282626752Cleaning up temporary data from old regions at 1732282626761 (+9 ms)Region opened successfully at 1732282626766 (+5 ms) 2024-11-22T13:37:06,767 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T13:37:06,770 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5429b96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:37:06,770 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T13:37:06,770 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T13:37:06,771 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T13:37:06,770 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:06,771 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T13:37:06,771 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T13:37:06,771 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T13:37:06,771 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T13:37:06,773 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T13:37:06,774 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T13:37:06,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:06,782 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T13:37:06,783 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T13:37:06,784 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T13:37:06,793 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T13:37:06,793 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T13:37:06,795 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T13:37:06,803 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T13:37:06,805 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T13:37:06,814 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T13:37:06,816 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T13:37:06,825 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T13:37:06,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:37:06,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:37:06,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,835 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,836 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e025332d312f,37969,1732282626343, sessionid=0x10162c32cc00000, setting cluster-up flag (Was=false) 2024-11-22T13:37:06,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,888 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T13:37:06,889 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,37969,1732282626343 2024-11-22T13:37:06,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:06,941 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T13:37:06,943 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,37969,1732282626343 2024-11-22T13:37:06,945 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T13:37:06,947 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T13:37:06,948 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T13:37:06,948 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T13:37:06,948 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e025332d312f,37969,1732282626343 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T13:37:06,951 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:37:06,951 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:37:06,951 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:37:06,951 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:37:06,952 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e025332d312f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T13:37:06,952 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:06,952 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:37:06,952 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:06,953 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732282656953 2024-11-22T13:37:06,953 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T13:37:06,953 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T13:37:06,953 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T13:37:06,953 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T13:37:06,953 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T13:37:06,953 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T13:37:06,953 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:06,954 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:37:06,954 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T13:37:06,954 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T13:37:06,954 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T13:37:06,954 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T13:37:06,954 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T13:37:06,954 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T13:37:06,954 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282626954,5,FailOnTimeoutGroup] 2024-11-22T13:37:06,955 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282626955,5,FailOnTimeoutGroup] 2024-11-22T13:37:06,955 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:06,955 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T13:37:06,955 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:06,955 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:06,955 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:06,955 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T13:37:06,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:37:06,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:37:06,964 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T13:37:06,964 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7 2024-11-22T13:37:06,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:37:06,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:37:06,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:37:06,973 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:37:06,975 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:37:06,975 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:06,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:37:06,976 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:37:06,977 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:37:06,977 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:06,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:37:06,978 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:37:06,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:37:06,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:06,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:37:06,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:37:06,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:37:06,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:06,983 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:37:06,983 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:37:06,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740 2024-11-22T13:37:06,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740 2024-11-22T13:37:06,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:37:06,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:37:06,986 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:37:06,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:37:06,990 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:37:06,991 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=734045, jitterRate=-0.0666140615940094}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:37:06,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732282626972Initializing all the Stores at 1732282626973 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282626973Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282626973Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282626973Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282626973Cleaning up temporary data from old regions at 1732282626986 (+13 ms)Region opened successfully at 1732282626992 (+6 ms) 2024-11-22T13:37:06,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:37:06,993 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:37:06,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:37:06,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:37:06,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:37:06,993 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:37:06,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282626992Disabling compacts and flushes for region at 1732282626992Disabling writes for close at 1732282626993 (+1 ms)Writing region close event to WAL at 1732282626993Closed at 1732282626993 2024-11-22T13:37:06,995 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:37:06,995 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T13:37:06,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T13:37:06,997 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:37:06,998 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T13:37:07,031 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(746): ClusterId : d23bbc96-9cdc-42fd-b9ef-0c389b6dfc8f 2024-11-22T13:37:07,031 DEBUG [RS:0;e025332d312f:44147 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T13:37:07,044 DEBUG [RS:0;e025332d312f:44147 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T13:37:07,044 DEBUG [RS:0;e025332d312f:44147 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T13:37:07,058 DEBUG [RS:0;e025332d312f:44147 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T13:37:07,058 DEBUG [RS:0;e025332d312f:44147 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787d8318, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:37:07,073 DEBUG [RS:0;e025332d312f:44147 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e025332d312f:44147 2024-11-22T13:37:07,073 INFO [RS:0;e025332d312f:44147 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T13:37:07,073 INFO [RS:0;e025332d312f:44147 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T13:37:07,073 DEBUG [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T13:37:07,074 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(2659): reportForDuty to master=e025332d312f,37969,1732282626343 with port=44147, startcode=1732282626611 2024-11-22T13:37:07,074 DEBUG [RS:0;e025332d312f:44147 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T13:37:07,076 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60617, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T13:37:07,076 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37969 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e025332d312f,44147,1732282626611 2024-11-22T13:37:07,076 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37969 {}] master.ServerManager(517): Registering regionserver=e025332d312f,44147,1732282626611 2024-11-22T13:37:07,078 DEBUG [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7 2024-11-22T13:37:07,078 DEBUG [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45327 2024-11-22T13:37:07,078 DEBUG [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T13:37:07,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:37:07,088 DEBUG [RS:0;e025332d312f:44147 {}] zookeeper.ZKUtil(111): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e025332d312f,44147,1732282626611 2024-11-22T13:37:07,088 WARN [RS:0;e025332d312f:44147 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:37:07,088 INFO [RS:0;e025332d312f:44147 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:37:07,088 DEBUG [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611 2024-11-22T13:37:07,088 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e025332d312f,44147,1732282626611] 2024-11-22T13:37:07,091 INFO [RS:0;e025332d312f:44147 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T13:37:07,093 INFO [RS:0;e025332d312f:44147 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T13:37:07,094 INFO [RS:0;e025332d312f:44147 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T13:37:07,094 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,094 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T13:37:07,095 INFO [RS:0;e025332d312f:44147 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T13:37:07,095 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,095 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,095 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,095 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,095 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,095 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,096 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:37:07,096 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,096 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,096 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,096 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,096 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,096 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:37:07,096 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:37:07,096 DEBUG [RS:0;e025332d312f:44147 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:37:07,097 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,097 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,097 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,097 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,097 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,097 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,44147,1732282626611-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:37:07,113 INFO [RS:0;e025332d312f:44147 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T13:37:07,113 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,44147,1732282626611-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,113 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,113 INFO [RS:0;e025332d312f:44147 {}] regionserver.Replication(171): e025332d312f,44147,1732282626611 started 2024-11-22T13:37:07,125 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,125 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(1482): Serving as e025332d312f,44147,1732282626611, RpcServer on e025332d312f/172.17.0.2:44147, sessionid=0x10162c32cc00001 2024-11-22T13:37:07,125 DEBUG [RS:0;e025332d312f:44147 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T13:37:07,125 DEBUG [RS:0;e025332d312f:44147 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e025332d312f,44147,1732282626611 2024-11-22T13:37:07,125 DEBUG [RS:0;e025332d312f:44147 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,44147,1732282626611' 2024-11-22T13:37:07,125 DEBUG [RS:0;e025332d312f:44147 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T13:37:07,126 DEBUG [RS:0;e025332d312f:44147 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T13:37:07,126 DEBUG [RS:0;e025332d312f:44147 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T13:37:07,126 DEBUG [RS:0;e025332d312f:44147 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T13:37:07,126 DEBUG [RS:0;e025332d312f:44147 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e025332d312f,44147,1732282626611 2024-11-22T13:37:07,126 DEBUG [RS:0;e025332d312f:44147 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,44147,1732282626611' 2024-11-22T13:37:07,126 DEBUG [RS:0;e025332d312f:44147 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T13:37:07,127 DEBUG [RS:0;e025332d312f:44147 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T13:37:07,127 DEBUG [RS:0;e025332d312f:44147 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T13:37:07,127 INFO [RS:0;e025332d312f:44147 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T13:37:07,127 INFO [RS:0;e025332d312f:44147 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T13:37:07,148 WARN [e025332d312f:37969 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T13:37:07,231 INFO [RS:0;e025332d312f:44147 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C44147%2C1732282626611, suffix=, logDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611, archiveDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/oldWALs, maxLogs=32 2024-11-22T13:37:07,233 INFO [RS:0;e025332d312f:44147 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C44147%2C1732282626611.1732282627232 2024-11-22T13:37:07,243 INFO [RS:0;e025332d312f:44147 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282627232 2024-11-22T13:37:07,244 DEBUG [RS:0;e025332d312f:44147 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41081:41081),(127.0.0.1/127.0.0.1:43847:43847)] 2024-11-22T13:37:07,399 DEBUG [e025332d312f:37969 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T13:37:07,400 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e025332d312f,44147,1732282626611 2024-11-22T13:37:07,404 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,44147,1732282626611, state=OPENING 2024-11-22T13:37:07,414 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T13:37:07,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:07,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:07,425 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:37:07,425 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:37:07,425 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,44147,1732282626611}] 2024-11-22T13:37:07,425 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:37:07,580 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T13:37:07,585 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56399, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T13:37:07,591 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T13:37:07,591 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:37:07,594 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C44147%2C1732282626611.meta, suffix=.meta, logDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611, archiveDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/oldWALs, maxLogs=32 2024-11-22T13:37:07,595 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C44147%2C1732282626611.meta.1732282627595.meta 2024-11-22T13:37:07,603 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.meta.1732282627595.meta 2024-11-22T13:37:07,604 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41081:41081),(127.0.0.1/127.0.0.1:43847:43847)] 2024-11-22T13:37:07,606 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:37:07,607 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T13:37:07,607 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T13:37:07,607 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T13:37:07,607 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T13:37:07,607 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:37:07,607 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T13:37:07,607 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T13:37:07,609 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:37:07,610 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:37:07,610 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:07,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:37:07,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:37:07,611 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:37:07,611 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:07,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:37:07,611 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:37:07,612 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:37:07,612 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:07,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:37:07,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:37:07,613 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:37:07,613 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:07,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:37:07,614 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:37:07,614 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740 2024-11-22T13:37:07,615 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740 2024-11-22T13:37:07,616 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:37:07,616 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:37:07,617 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:37:07,618 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:37:07,619 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=700361, jitterRate=-0.1094459593296051}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:37:07,619 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T13:37:07,620 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732282627608Writing region info on filesystem at 1732282627608Initializing all the Stores at 1732282627608Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282627609 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282627609Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282627609Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282627609Cleaning up temporary data from old regions at 1732282627617 (+8 ms)Running coprocessor post-open hooks at 1732282627619 (+2 ms)Region opened successfully at 1732282627620 (+1 ms) 2024-11-22T13:37:07,621 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732282627579 2024-11-22T13:37:07,623 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T13:37:07,623 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T13:37:07,624 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,44147,1732282626611 2024-11-22T13:37:07,625 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,44147,1732282626611, state=OPEN 2024-11-22T13:37:07,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:37:07,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:37:07,666 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e025332d312f,44147,1732282626611 2024-11-22T13:37:07,666 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:37:07,666 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:37:07,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T13:37:07,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,44147,1732282626611 in 241 msec 2024-11-22T13:37:07,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T13:37:07,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 674 msec 2024-11-22T13:37:07,673 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:37:07,673 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T13:37:07,674 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:37:07,674 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,44147,1732282626611, seqNum=-1] 2024-11-22T13:37:07,675 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:37:07,676 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53191, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:37:07,682 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 735 msec 2024-11-22T13:37:07,682 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732282627682, completionTime=-1 2024-11-22T13:37:07,682 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T13:37:07,682 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T13:37:07,684 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T13:37:07,684 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732282687684 2024-11-22T13:37:07,684 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732282747684 2024-11-22T13:37:07,684 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T13:37:07,684 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,37969,1732282626343-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,684 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,37969,1732282626343-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,684 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,37969,1732282626343-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,685 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e025332d312f:37969, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,685 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,685 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,686 DEBUG [master/e025332d312f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T13:37:07,688 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.010sec 2024-11-22T13:37:07,688 INFO [master/e025332d312f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T13:37:07,688 INFO [master/e025332d312f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T13:37:07,688 INFO [master/e025332d312f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T13:37:07,688 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T13:37:07,688 INFO [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T13:37:07,688 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,37969,1732282626343-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:37:07,688 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,37969,1732282626343-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T13:37:07,691 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T13:37:07,691 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T13:37:07,691 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,37969,1732282626343-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:07,732 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@520603a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:37:07,732 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e025332d312f,37969,-1 for getting cluster id 2024-11-22T13:37:07,732 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T13:37:07,734 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd23bbc96-9cdc-42fd-b9ef-0c389b6dfc8f' 2024-11-22T13:37:07,734 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T13:37:07,735 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d23bbc96-9cdc-42fd-b9ef-0c389b6dfc8f" 2024-11-22T13:37:07,735 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13af4191, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:37:07,735 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e025332d312f,37969,-1] 2024-11-22T13:37:07,735 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T13:37:07,736 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:37:07,738 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50360, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T13:37:07,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7113b5b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:37:07,740 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:37:07,741 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,44147,1732282626611, seqNum=-1] 2024-11-22T13:37:07,742 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:37:07,743 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39940, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:37:07,746 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e025332d312f,37969,1732282626343 2024-11-22T13:37:07,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:37:07,749 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T13:37:07,750 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T13:37:07,751 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is e025332d312f,37969,1732282626343 2024-11-22T13:37:07,751 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@9c84136 2024-11-22T13:37:07,751 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T13:37:07,752 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50362, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T13:37:07,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T13:37:07,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T13:37:07,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:37:07,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:07,756 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T13:37:07,756 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:07,756 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-22T13:37:07,757 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T13:37:07,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:37:07,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741835_1011 (size=405) 2024-11-22T13:37:07,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741835_1011 (size=405) 2024-11-22T13:37:07,767 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8e600c4873846f232a98bdda07155394, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7 2024-11-22T13:37:07,771 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:07,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741836_1012 (size=88) 2024-11-22T13:37:07,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741836_1012 (size=88) 2024-11-22T13:37:07,773 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:37:07,774 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 8e600c4873846f232a98bdda07155394, disabling compactions & flushes 2024-11-22T13:37:07,774 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:07,774 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:07,774 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. after waiting 0 ms 2024-11-22T13:37:07,774 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:07,774 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:07,774 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8e600c4873846f232a98bdda07155394: Waiting for close lock at 1732282627774Disabling compacts and flushes for region at 1732282627774Disabling writes for close at 1732282627774Writing region close event to WAL at 1732282627774Closed at 1732282627774 2024-11-22T13:37:07,775 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T13:37:07,775 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732282627775"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732282627775"}]},"ts":"1732282627775"} 2024-11-22T13:37:07,775 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:07,778 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T13:37:07,779 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T13:37:07,779 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282627779"}]},"ts":"1732282627779"} 2024-11-22T13:37:07,781 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-22T13:37:07,782 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e600c4873846f232a98bdda07155394, ASSIGN}] 2024-11-22T13:37:07,783 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e600c4873846f232a98bdda07155394, ASSIGN 2024-11-22T13:37:07,784 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e600c4873846f232a98bdda07155394, ASSIGN; state=OFFLINE, location=e025332d312f,44147,1732282626611; forceNewPlan=false, retain=false 2024-11-22T13:37:07,935 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8e600c4873846f232a98bdda07155394, regionState=OPENING, regionLocation=e025332d312f,44147,1732282626611 2024-11-22T13:37:07,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e600c4873846f232a98bdda07155394, ASSIGN because future has completed 2024-11-22T13:37:07,939 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e600c4873846f232a98bdda07155394, server=e025332d312f,44147,1732282626611}] 2024-11-22T13:37:08,101 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:08,101 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8e600c4873846f232a98bdda07155394, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:37:08,102 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,102 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:37:08,103 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,103 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,105 INFO [StoreOpener-8e600c4873846f232a98bdda07155394-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,107 INFO [StoreOpener-8e600c4873846f232a98bdda07155394-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e600c4873846f232a98bdda07155394 columnFamilyName info 2024-11-22T13:37:08,107 DEBUG [StoreOpener-8e600c4873846f232a98bdda07155394-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:37:08,107 INFO [StoreOpener-8e600c4873846f232a98bdda07155394-1 {}] regionserver.HStore(327): Store=8e600c4873846f232a98bdda07155394/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:37:08,107 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,109 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,109 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,110 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,110 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,112 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,115 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:37:08,116 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8e600c4873846f232a98bdda07155394; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=878614, jitterRate=0.11721612513065338}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T13:37:08,116 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:08,117 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8e600c4873846f232a98bdda07155394: Running coprocessor pre-open hook at 1732282628103Writing region info on filesystem at 1732282628103Initializing all the Stores at 1732282628104 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282628104Cleaning up temporary data from old regions at 1732282628110 (+6 ms)Running coprocessor post-open hooks at 1732282628116 (+6 ms)Region opened successfully at 1732282628116 2024-11-22T13:37:08,118 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394., pid=6, masterSystemTime=1732282628093 2024-11-22T13:37:08,120 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:08,120 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:08,121 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8e600c4873846f232a98bdda07155394, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,44147,1732282626611 2024-11-22T13:37:08,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e600c4873846f232a98bdda07155394, server=e025332d312f,44147,1732282626611 because future has completed 2024-11-22T13:37:08,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T13:37:08,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8e600c4873846f232a98bdda07155394, server=e025332d312f,44147,1732282626611 in 186 msec 2024-11-22T13:37:08,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T13:37:08,131 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=8e600c4873846f232a98bdda07155394, ASSIGN in 346 msec 2024-11-22T13:37:08,133 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T13:37:08,133 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282628133"}]},"ts":"1732282628133"} 2024-11-22T13:37:08,135 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-22T13:37:08,137 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T13:37:08,139 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 384 msec 2024-11-22T13:37:08,772 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:08,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:09,773 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:09,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:10,774 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:10,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:11,776 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:11,779 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:12,608 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,631 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,634 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:12,777 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:12,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:13,142 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T13:37:13,145 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,146 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,146 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,173 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:37:13,180 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T13:37:13,181 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-22T13:37:13,778 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:13,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:14,780 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:14,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:15,781 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:15,784 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:16,782 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:16,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:16,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T13:37:16,898 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T13:37:16,900 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:37:16,900 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T13:37:16,901 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:16,901 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T13:37:17,783 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:17,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:17,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:37:17,850 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T13:37:17,850 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-22T13:37:17,859 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:17,859 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:17,864 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394., hostname=e025332d312f,44147,1732282626611, seqNum=2] 2024-11-22T13:37:17,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:17,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:17,879 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T13:37:17,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T13:37:17,880 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T13:37:17,881 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T13:37:18,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44147 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-22T13:37:18,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:18,054 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 8e600c4873846f232a98bdda07155394 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T13:37:18,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/6fdf79383e3d4f36a774dcda3c6d10bc is 1080, key is row0001/info:/1732282637865/Put/seqid=0 2024-11-22T13:37:18,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741837_1013 (size=6033) 2024-11-22T13:37:18,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741837_1013 (size=6033) 2024-11-22T13:37:18,075 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/6fdf79383e3d4f36a774dcda3c6d10bc 2024-11-22T13:37:18,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/6fdf79383e3d4f36a774dcda3c6d10bc as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/6fdf79383e3d4f36a774dcda3c6d10bc 2024-11-22T13:37:18,086 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/6fdf79383e3d4f36a774dcda3c6d10bc, entries=1, sequenceid=5, filesize=5.9 K 2024-11-22T13:37:18,087 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e600c4873846f232a98bdda07155394 in 33ms, sequenceid=5, compaction requested=false 2024-11-22T13:37:18,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 8e600c4873846f232a98bdda07155394: 2024-11-22T13:37:18,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:18,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-22T13:37:18,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-22T13:37:18,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T13:37:18,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 211 msec 2024-11-22T13:37:18,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 220 msec 2024-11-22T13:37:18,785 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:18,787 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:19,786 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:19,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:20,788 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:20,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:21,789 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:21,791 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:22,790 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:22,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:23,792 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:23,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:24,793 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:24,794 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:25,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:25,795 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:26,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:26,796 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:27,797 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:27,798 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:27,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 after 68079ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:37:27,799 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta after 68068ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T13:37:27,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T13:37:27,910 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T13:37:27,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:27,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T13:37:27,923 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T13:37:27,925 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T13:37:27,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T13:37:28,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44147 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-22T13:37:28,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:28,082 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 8e600c4873846f232a98bdda07155394 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T13:37:28,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/570ea747ee0a4633ad2408a3803269de is 1080, key is row0002/info:/1732282647913/Put/seqid=0 2024-11-22T13:37:28,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741838_1014 (size=6033) 2024-11-22T13:37:28,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741838_1014 (size=6033) 2024-11-22T13:37:28,101 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/570ea747ee0a4633ad2408a3803269de 2024-11-22T13:37:28,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/570ea747ee0a4633ad2408a3803269de as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/570ea747ee0a4633ad2408a3803269de 2024-11-22T13:37:28,115 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/570ea747ee0a4633ad2408a3803269de, entries=1, sequenceid=9, filesize=5.9 K 2024-11-22T13:37:28,116 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e600c4873846f232a98bdda07155394 in 35ms, sequenceid=9, compaction requested=false 2024-11-22T13:37:28,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 8e600c4873846f232a98bdda07155394: 2024-11-22T13:37:28,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:28,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-22T13:37:28,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-22T13:37:28,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-22T13:37:28,121 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 194 msec 2024-11-22T13:37:28,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 204 msec 2024-11-22T13:37:28,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:28,800 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:29,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:29,801 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:30,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:30,802 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:31,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:31,804 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:32,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:32,805 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:33,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:33,807 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:34,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:34,808 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:35,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:35,810 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:36,323 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T13:37:36,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:36,811 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:37,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:37,813 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:37,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T13:37:37,970 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T13:37:37,979 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C44147%2C1732282626611.1732282657978 2024-11-22T13:37:37,984 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:37,984 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:37,985 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:37,985 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:37,985 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:37,985 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282627232 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282657978 2024-11-22T13:37:37,986 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43847:43847),(127.0.0.1/127.0.0.1:41081:41081)] 2024-11-22T13:37:37,986 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282627232 is not closed yet, will try archiving it next time 2024-11-22T13:37:37,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741833_1009 (size=5546) 2024-11-22T13:37:37,987 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:37,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741833_1009 (size=5546) 2024-11-22T13:37:37,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:37,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T13:37:37,989 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T13:37:37,990 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T13:37:37,990 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T13:37:38,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44147 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-22T13:37:38,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:38,146 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 8e600c4873846f232a98bdda07155394 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T13:37:38,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/b3ffc6e7fe6248fa9d610d7cc51bc438 is 1080, key is row0003/info:/1732282657974/Put/seqid=0 2024-11-22T13:37:38,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741840_1016 (size=6033) 2024-11-22T13:37:38,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741840_1016 (size=6033) 2024-11-22T13:37:38,159 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/b3ffc6e7fe6248fa9d610d7cc51bc438 2024-11-22T13:37:38,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/b3ffc6e7fe6248fa9d610d7cc51bc438 as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/b3ffc6e7fe6248fa9d610d7cc51bc438 2024-11-22T13:37:38,172 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/b3ffc6e7fe6248fa9d610d7cc51bc438, entries=1, sequenceid=13, filesize=5.9 K 2024-11-22T13:37:38,173 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e600c4873846f232a98bdda07155394 in 27ms, sequenceid=13, compaction requested=true 2024-11-22T13:37:38,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 8e600c4873846f232a98bdda07155394: 2024-11-22T13:37:38,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:38,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-22T13:37:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-22T13:37:38,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-22T13:37:38,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-22T13:37:38,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-22T13:37:38,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:38,815 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:39,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:39,816 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:40,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:40,818 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:41,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:41,820 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:42,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:42,821 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:43,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:43,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:44,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:44,824 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:45,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:45,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:46,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:46,827 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:47,692 INFO [master/e025332d312f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T13:37:47,692 INFO [master/e025332d312f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T13:37:47,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:47,828 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:48,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T13:37:48,040 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T13:37:48,040 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:37:48,044 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:37:48,044 DEBUG [Time-limited test {}] regionserver.HStore(1541): 8e600c4873846f232a98bdda07155394/info is initiating minor compaction (all files) 2024-11-22T13:37:48,045 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T13:37:48,045 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:37:48,045 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 8e600c4873846f232a98bdda07155394/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:48,045 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/6fdf79383e3d4f36a774dcda3c6d10bc, hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/570ea747ee0a4633ad2408a3803269de, hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/b3ffc6e7fe6248fa9d610d7cc51bc438] into tmpdir=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp, totalSize=17.7 K 2024-11-22T13:37:48,047 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 6fdf79383e3d4f36a774dcda3c6d10bc, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732282637865 2024-11-22T13:37:48,049 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 570ea747ee0a4633ad2408a3803269de, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732282647913 2024-11-22T13:37:48,050 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting b3ffc6e7fe6248fa9d610d7cc51bc438, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732282657974 2024-11-22T13:37:48,064 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 8e600c4873846f232a98bdda07155394#info#compaction#45 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:37:48,064 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/a21a3b93e81f48128e091fb5e540602a is 1080, key is row0001/info:/1732282637865/Put/seqid=0 2024-11-22T13:37:48,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741841_1017 (size=8296) 2024-11-22T13:37:48,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741841_1017 (size=8296) 2024-11-22T13:37:48,075 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/a21a3b93e81f48128e091fb5e540602a as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/a21a3b93e81f48128e091fb5e540602a 2024-11-22T13:37:48,082 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8e600c4873846f232a98bdda07155394/info of 8e600c4873846f232a98bdda07155394 into a21a3b93e81f48128e091fb5e540602a(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:37:48,082 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 8e600c4873846f232a98bdda07155394: 2024-11-22T13:37:48,085 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C44147%2C1732282626611.1732282668084 2024-11-22T13:37:48,090 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:48,090 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:48,091 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:48,091 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:48,091 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:48,091 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282657978 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282668084 2024-11-22T13:37:48,092 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41081:41081),(127.0.0.1/127.0.0.1:43847:43847)] 2024-11-22T13:37:48,092 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282657978 is not closed yet, will try archiving it next time 2024-11-22T13:37:48,092 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282627232 to hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/oldWALs/e025332d312f%2C44147%2C1732282626611.1732282627232 2024-11-22T13:37:48,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741839_1015 (size=2520) 2024-11-22T13:37:48,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741839_1015 (size=2520) 2024-11-22T13:37:48,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:48,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:37:48,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T13:37:48,095 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T13:37:48,096 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T13:37:48,096 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T13:37:48,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44147 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-22T13:37:48,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:48,248 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 8e600c4873846f232a98bdda07155394 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T13:37:48,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/7c5bb88a17924e199683efbe4c8433af is 1080, key is row0000/info:/1732282668083/Put/seqid=0 2024-11-22T13:37:48,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741843_1019 (size=6033) 2024-11-22T13:37:48,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741843_1019 (size=6033) 2024-11-22T13:37:48,265 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/7c5bb88a17924e199683efbe4c8433af 2024-11-22T13:37:48,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/7c5bb88a17924e199683efbe4c8433af as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/7c5bb88a17924e199683efbe4c8433af 2024-11-22T13:37:48,277 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/7c5bb88a17924e199683efbe4c8433af, entries=1, sequenceid=18, filesize=5.9 K 2024-11-22T13:37:48,278 INFO [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e600c4873846f232a98bdda07155394 in 30ms, sequenceid=18, compaction requested=false 2024-11-22T13:37:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 8e600c4873846f232a98bdda07155394: 2024-11-22T13:37:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-22T13:37:48,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-22T13:37:48,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-22T13:37:48,283 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 184 msec 2024-11-22T13:37:48,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-11-22T13:37:48,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:48,829 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:49,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:49,831 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:50,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:50,832 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:51,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:51,834 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:52,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:52,835 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:53,102 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8e600c4873846f232a98bdda07155394, had cached 0 bytes from a total of 14329 2024-11-22T13:37:53,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:53,837 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:54,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:54,838 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:55,839 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:55,840 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:56,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:56,841 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:57,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:57,843 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:58,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37969 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T13:37:58,160 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T13:37:58,167 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C44147%2C1732282626611.1732282678167 2024-11-22T13:37:58,175 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,176 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,176 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,176 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,176 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,176 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282668084 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282678167 2024-11-22T13:37:58,177 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41081:41081),(127.0.0.1/127.0.0.1:43847:43847)] 2024-11-22T13:37:58,177 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282668084 is not closed yet, will try archiving it next time 2024-11-22T13:37:58,177 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/WALs/e025332d312f,44147,1732282626611/e025332d312f%2C44147%2C1732282626611.1732282657978 to hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/oldWALs/e025332d312f%2C44147%2C1732282626611.1732282657978 2024-11-22T13:37:58,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T13:37:58,178 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:37:58,178 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:37:58,178 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:37:58,178 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:37:58,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741842_1018 (size=2026) 2024-11-22T13:37:58,178 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T13:37:58,178 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=441201650, stopped=false 2024-11-22T13:37:58,178 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e025332d312f,37969,1732282626343 2024-11-22T13:37:58,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741842_1018 (size=2026) 2024-11-22T13:37:58,180 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T13:37:58,210 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:37:58,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:37:58,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:37:58,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:58,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:58,210 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:37:58,210 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:37:58,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:37:58,211 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e025332d312f,44147,1732282626611' ***** 2024-11-22T13:37:58,211 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T13:37:58,211 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:37:58,211 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:37:58,212 INFO [RS:0;e025332d312f:44147 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T13:37:58,212 INFO [RS:0;e025332d312f:44147 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T13:37:58,212 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T13:37:58,212 INFO [RS:0;e025332d312f:44147 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T13:37:58,212 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(3091): Received CLOSE for 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:58,212 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(959): stopping server e025332d312f,44147,1732282626611 2024-11-22T13:37:58,212 INFO [RS:0;e025332d312f:44147 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:37:58,213 INFO [RS:0;e025332d312f:44147 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e025332d312f:44147. 2024-11-22T13:37:58,213 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8e600c4873846f232a98bdda07155394, disabling compactions & flushes 2024-11-22T13:37:58,213 DEBUG [RS:0;e025332d312f:44147 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:37:58,213 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:58,213 DEBUG [RS:0;e025332d312f:44147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:37:58,213 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:58,213 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. after waiting 0 ms 2024-11-22T13:37:58,213 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:58,213 INFO [RS:0;e025332d312f:44147 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T13:37:58,213 INFO [RS:0;e025332d312f:44147 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T13:37:58,213 INFO [RS:0;e025332d312f:44147 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T13:37:58,213 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8e600c4873846f232a98bdda07155394 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T13:37:58,213 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T13:37:58,214 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T13:37:58,214 DEBUG [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8e600c4873846f232a98bdda07155394=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.} 2024-11-22T13:37:58,214 DEBUG [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8e600c4873846f232a98bdda07155394 2024-11-22T13:37:58,214 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:37:58,214 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:37:58,214 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:37:58,214 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:37:58,214 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:37:58,215 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-22T13:37:58,219 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/382ea67faf044dc0820b748b05220ad9 is 1080, key is row0001/info:/1732282678163/Put/seqid=0 2024-11-22T13:37:58,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741845_1021 (size=6033) 2024-11-22T13:37:58,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741845_1021 (size=6033) 2024-11-22T13:37:58,225 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/382ea67faf044dc0820b748b05220ad9 2024-11-22T13:37:58,232 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/.tmp/info/382ea67faf044dc0820b748b05220ad9 as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/382ea67faf044dc0820b748b05220ad9 2024-11-22T13:37:58,235 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/.tmp/info/51d79c8839af4cc382b32b34149b32bf is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394./info:regioninfo/1732282628121/Put/seqid=0 2024-11-22T13:37:58,238 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/382ea67faf044dc0820b748b05220ad9, entries=1, sequenceid=22, filesize=5.9 K 2024-11-22T13:37:58,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741846_1022 (size=7308) 2024-11-22T13:37:58,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741846_1022 (size=7308) 2024-11-22T13:37:58,239 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e600c4873846f232a98bdda07155394 in 26ms, sequenceid=22, compaction requested=true 2024-11-22T13:37:58,239 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/.tmp/info/51d79c8839af4cc382b32b34149b32bf 2024-11-22T13:37:58,240 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/6fdf79383e3d4f36a774dcda3c6d10bc, hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/570ea747ee0a4633ad2408a3803269de, hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/b3ffc6e7fe6248fa9d610d7cc51bc438] to archive 2024-11-22T13:37:58,240 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T13:37:58,242 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/6fdf79383e3d4f36a774dcda3c6d10bc to hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/6fdf79383e3d4f36a774dcda3c6d10bc 2024-11-22T13:37:58,243 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/570ea747ee0a4633ad2408a3803269de to hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/570ea747ee0a4633ad2408a3803269de 2024-11-22T13:37:58,244 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/b3ffc6e7fe6248fa9d610d7cc51bc438 to hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/info/b3ffc6e7fe6248fa9d610d7cc51bc438 2024-11-22T13:37:58,244 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e025332d312f:37969 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T13:37:58,245 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [6fdf79383e3d4f36a774dcda3c6d10bc=6033, 570ea747ee0a4633ad2408a3803269de=6033, b3ffc6e7fe6248fa9d610d7cc51bc438=6033] 2024-11-22T13:37:58,248 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/8e600c4873846f232a98bdda07155394/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-22T13:37:58,249 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:58,249 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8e600c4873846f232a98bdda07155394: Waiting for close lock at 1732282678213Running coprocessor pre-close hooks at 1732282678213Disabling compacts and flushes for region at 1732282678213Disabling writes for close at 1732282678213Obtaining lock to block concurrent updates at 1732282678213Preparing flush snapshotting stores in 8e600c4873846f232a98bdda07155394 at 1732282678213Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732282678214 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. at 1732282678215 (+1 ms)Flushing 8e600c4873846f232a98bdda07155394/info: creating writer at 1732282678215Flushing 8e600c4873846f232a98bdda07155394/info: appending metadata at 1732282678219 (+4 ms)Flushing 8e600c4873846f232a98bdda07155394/info: closing flushed file at 1732282678219Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4fffb525: reopening flushed file at 1732282678231 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 8e600c4873846f232a98bdda07155394 in 26ms, sequenceid=22, compaction requested=true at 1732282678239 (+8 ms)Writing region close event to WAL at 1732282678245 (+6 ms)Running coprocessor post-close hooks at 1732282678248 (+3 ms)Closed at 1732282678248 2024-11-22T13:37:58,249 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732282627753.8e600c4873846f232a98bdda07155394. 2024-11-22T13:37:58,258 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/.tmp/ns/1e76128fdd89413784f52ed518caf224 is 43, key is default/ns:d/1732282627677/Put/seqid=0 2024-11-22T13:37:58,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741847_1023 (size=5153) 2024-11-22T13:37:58,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741847_1023 (size=5153) 2024-11-22T13:37:58,263 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/.tmp/ns/1e76128fdd89413784f52ed518caf224 2024-11-22T13:37:58,281 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/.tmp/table/8d7e3ba39584492ba82cd324a25e61bb is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732282628133/Put/seqid=0 2024-11-22T13:37:58,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741848_1024 (size=5508) 2024-11-22T13:37:58,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741848_1024 (size=5508) 2024-11-22T13:37:58,286 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/.tmp/table/8d7e3ba39584492ba82cd324a25e61bb 2024-11-22T13:37:58,291 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/.tmp/info/51d79c8839af4cc382b32b34149b32bf as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/info/51d79c8839af4cc382b32b34149b32bf 2024-11-22T13:37:58,295 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/info/51d79c8839af4cc382b32b34149b32bf, entries=10, sequenceid=11, filesize=7.1 K 2024-11-22T13:37:58,296 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/.tmp/ns/1e76128fdd89413784f52ed518caf224 as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/ns/1e76128fdd89413784f52ed518caf224 2024-11-22T13:37:58,301 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/ns/1e76128fdd89413784f52ed518caf224, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T13:37:58,302 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/.tmp/table/8d7e3ba39584492ba82cd324a25e61bb as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/table/8d7e3ba39584492ba82cd324a25e61bb 2024-11-22T13:37:58,307 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/table/8d7e3ba39584492ba82cd324a25e61bb, entries=2, sequenceid=11, filesize=5.4 K 2024-11-22T13:37:58,308 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false 2024-11-22T13:37:58,313 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T13:37:58,313 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:37:58,313 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:37:58,313 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282678214Running coprocessor pre-close hooks at 1732282678214Disabling compacts and flushes for region at 1732282678214Disabling writes for close at 1732282678214Obtaining lock to block concurrent updates at 1732282678215 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732282678215Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732282678215Flushing stores of hbase:meta,,1.1588230740 at 1732282678217 (+2 ms)Flushing 1588230740/info: creating writer at 1732282678217Flushing 1588230740/info: appending metadata at 1732282678234 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732282678234Flushing 1588230740/ns: creating writer at 1732282678244 (+10 ms)Flushing 1588230740/ns: appending metadata at 1732282678258 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732282678258Flushing 1588230740/table: creating writer at 1732282678267 (+9 ms)Flushing 1588230740/table: appending metadata at 1732282678281 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732282678281Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@11ef5e1f: reopening flushed file at 1732282678290 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d73c104: reopening flushed file at 1732282678295 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3268d1e0: reopening flushed file at 1732282678301 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 94ms, sequenceid=11, compaction requested=false at 1732282678308 (+7 ms)Writing region close event to WAL at 1732282678309 (+1 ms)Running coprocessor post-close hooks at 1732282678313 (+4 ms)Closed at 1732282678313 2024-11-22T13:37:58,313 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T13:37:58,414 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(976): stopping server e025332d312f,44147,1732282626611; all regions closed. 2024-11-22T13:37:58,415 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,415 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,415 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,415 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,416 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741834_1010 (size=3306) 2024-11-22T13:37:58,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741834_1010 (size=3306) 2024-11-22T13:37:58,423 DEBUG [RS:0;e025332d312f:44147 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/oldWALs 2024-11-22T13:37:58,424 INFO [RS:0;e025332d312f:44147 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C44147%2C1732282626611.meta:.meta(num 1732282627595) 2024-11-22T13:37:58,424 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,424 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,424 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,424 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,425 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741844_1020 (size=1252) 2024-11-22T13:37:58,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741844_1020 (size=1252) 2024-11-22T13:37:58,429 DEBUG [RS:0;e025332d312f:44147 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/oldWALs 2024-11-22T13:37:58,429 INFO [RS:0;e025332d312f:44147 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C44147%2C1732282626611:(num 1732282678167) 2024-11-22T13:37:58,429 DEBUG [RS:0;e025332d312f:44147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:37:58,429 INFO [RS:0;e025332d312f:44147 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:37:58,429 INFO [RS:0;e025332d312f:44147 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:37:58,429 INFO [RS:0;e025332d312f:44147 {}] hbase.ChoreService(370): Chore service for: regionserver/e025332d312f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T13:37:58,429 INFO [RS:0;e025332d312f:44147 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:37:58,429 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:37:58,430 INFO [RS:0;e025332d312f:44147 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44147 2024-11-22T13:37:58,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e025332d312f,44147,1732282626611 2024-11-22T13:37:58,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:37:58,442 INFO [RS:0;e025332d312f:44147 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:37:58,452 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e025332d312f,44147,1732282626611] 2024-11-22T13:37:58,463 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e025332d312f,44147,1732282626611 already deleted, retry=false 2024-11-22T13:37:58,463 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e025332d312f,44147,1732282626611 expired; onlineServers=0 2024-11-22T13:37:58,463 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e025332d312f,37969,1732282626343' ***** 2024-11-22T13:37:58,463 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T13:37:58,463 INFO [M:0;e025332d312f:37969 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:37:58,463 INFO [M:0;e025332d312f:37969 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:37:58,463 DEBUG [M:0;e025332d312f:37969 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T13:37:58,464 DEBUG [M:0;e025332d312f:37969 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T13:37:58,464 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T13:37:58,464 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282626955 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282626955,5,FailOnTimeoutGroup] 2024-11-22T13:37:58,464 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282626954 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282626954,5,FailOnTimeoutGroup] 2024-11-22T13:37:58,464 INFO [M:0;e025332d312f:37969 {}] hbase.ChoreService(370): Chore service for: master/e025332d312f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T13:37:58,464 INFO [M:0;e025332d312f:37969 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:37:58,464 DEBUG [M:0;e025332d312f:37969 {}] master.HMaster(1795): Stopping service threads 2024-11-22T13:37:58,464 INFO [M:0;e025332d312f:37969 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T13:37:58,465 INFO [M:0;e025332d312f:37969 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:37:58,465 INFO [M:0;e025332d312f:37969 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T13:37:58,465 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T13:37:58,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T13:37:58,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:37:58,474 DEBUG [M:0;e025332d312f:37969 {}] zookeeper.ZKUtil(347): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T13:37:58,474 WARN [M:0;e025332d312f:37969 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T13:37:58,475 INFO [M:0;e025332d312f:37969 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/.lastflushedseqids 2024-11-22T13:37:58,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741849_1025 (size=130) 2024-11-22T13:37:58,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741849_1025 (size=130) 2024-11-22T13:37:58,482 INFO [M:0;e025332d312f:37969 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T13:37:58,482 INFO [M:0;e025332d312f:37969 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T13:37:58,482 DEBUG [M:0;e025332d312f:37969 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:37:58,482 INFO [M:0;e025332d312f:37969 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:37:58,482 DEBUG [M:0;e025332d312f:37969 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:37:58,482 DEBUG [M:0;e025332d312f:37969 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:37:58,482 DEBUG [M:0;e025332d312f:37969 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:37:58,482 INFO [M:0;e025332d312f:37969 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-22T13:37:58,501 DEBUG [M:0;e025332d312f:37969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d640719cc3024a938f61bc158ad1aefa is 82, key is hbase:meta,,1/info:regioninfo/1732282627623/Put/seqid=0 2024-11-22T13:37:58,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741850_1026 (size=5672) 2024-11-22T13:37:58,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741850_1026 (size=5672) 2024-11-22T13:37:58,506 INFO [M:0;e025332d312f:37969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d640719cc3024a938f61bc158ad1aefa 2024-11-22T13:37:58,525 DEBUG [M:0;e025332d312f:37969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72a420ff0fa64dd2a2c69dde14aaa352 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732282628139/Put/seqid=0 2024-11-22T13:37:58,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741851_1027 (size=7824) 2024-11-22T13:37:58,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741851_1027 (size=7824) 2024-11-22T13:37:58,530 INFO [M:0;e025332d312f:37969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72a420ff0fa64dd2a2c69dde14aaa352 2024-11-22T13:37:58,534 INFO [M:0;e025332d312f:37969 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 72a420ff0fa64dd2a2c69dde14aaa352 2024-11-22T13:37:58,549 DEBUG [M:0;e025332d312f:37969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4139c4785aaf4f548a23c21760f9a40e is 69, key is e025332d312f,44147,1732282626611/rs:state/1732282627077/Put/seqid=0 2024-11-22T13:37:58,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:37:58,553 INFO [RS:0;e025332d312f:44147 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:37:58,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44147-0x10162c32cc00001, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:37:58,553 INFO [RS:0;e025332d312f:44147 {}] regionserver.HRegionServer(1031): Exiting; stopping=e025332d312f,44147,1732282626611; zookeeper connection closed. 2024-11-22T13:37:58,553 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@22190674 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@22190674 2024-11-22T13:37:58,553 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T13:37:58,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741852_1028 (size=5156) 2024-11-22T13:37:58,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741852_1028 (size=5156) 2024-11-22T13:37:58,554 INFO [M:0;e025332d312f:37969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4139c4785aaf4f548a23c21760f9a40e 2024-11-22T13:37:58,578 DEBUG [M:0;e025332d312f:37969 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ca5d525c195e45ba85bb2347ac92ce67 is 52, key is load_balancer_on/state:d/1732282627748/Put/seqid=0 2024-11-22T13:37:58,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741853_1029 (size=5056) 2024-11-22T13:37:58,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741853_1029 (size=5056) 2024-11-22T13:37:58,583 INFO [M:0;e025332d312f:37969 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ca5d525c195e45ba85bb2347ac92ce67 2024-11-22T13:37:58,588 DEBUG [M:0;e025332d312f:37969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d640719cc3024a938f61bc158ad1aefa as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d640719cc3024a938f61bc158ad1aefa 2024-11-22T13:37:58,594 INFO [M:0;e025332d312f:37969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d640719cc3024a938f61bc158ad1aefa, entries=8, sequenceid=121, filesize=5.5 K 2024-11-22T13:37:58,594 DEBUG [M:0;e025332d312f:37969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/72a420ff0fa64dd2a2c69dde14aaa352 as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/72a420ff0fa64dd2a2c69dde14aaa352 2024-11-22T13:37:58,599 INFO [M:0;e025332d312f:37969 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 72a420ff0fa64dd2a2c69dde14aaa352 2024-11-22T13:37:58,599 INFO [M:0;e025332d312f:37969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/72a420ff0fa64dd2a2c69dde14aaa352, entries=14, sequenceid=121, filesize=7.6 K 2024-11-22T13:37:58,600 DEBUG [M:0;e025332d312f:37969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/4139c4785aaf4f548a23c21760f9a40e as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4139c4785aaf4f548a23c21760f9a40e 2024-11-22T13:37:58,605 INFO [M:0;e025332d312f:37969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/4139c4785aaf4f548a23c21760f9a40e, entries=1, sequenceid=121, filesize=5.0 K 2024-11-22T13:37:58,606 DEBUG [M:0;e025332d312f:37969 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/ca5d525c195e45ba85bb2347ac92ce67 as hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ca5d525c195e45ba85bb2347ac92ce67 2024-11-22T13:37:58,610 INFO [M:0;e025332d312f:37969 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45327/user/jenkins/test-data/c3287252-4eec-e4c6-af9a-51ef2b1b71f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/ca5d525c195e45ba85bb2347ac92ce67, entries=1, sequenceid=121, filesize=4.9 K 2024-11-22T13:37:58,612 INFO [M:0;e025332d312f:37969 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44647, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=121, compaction requested=false 2024-11-22T13:37:58,614 INFO [M:0;e025332d312f:37969 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:37:58,614 DEBUG [M:0;e025332d312f:37969 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282678482Disabling compacts and flushes for region at 1732282678482Disabling writes for close at 1732282678482Obtaining lock to block concurrent updates at 1732282678482Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732282678482Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44647, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1732282678483 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732282678483Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732282678483Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732282678500 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732282678500Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732282678510 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732282678525 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732282678525Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732282678534 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732282678548 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732282678548Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732282678559 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732282678577 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732282678577Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47b05f09: reopening flushed file at 1732282678587 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a04ffab: reopening flushed file at 1732282678594 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4639d970: reopening flushed file at 1732282678599 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@735da2c6: reopening flushed file at 1732282678605 (+6 ms)Finished flush of dataSize ~43.60 KB/44647, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 129ms, sequenceid=121, compaction requested=false at 1732282678612 (+7 ms)Writing region close event to WAL at 1732282678614 (+2 ms)Closed at 1732282678614 2024-11-22T13:37:58,616 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,616 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,616 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,616 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,616 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:37:58,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44673 is added to blk_1073741830_1006 (size=53044) 2024-11-22T13:37:58,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36567 is added to blk_1073741830_1006 (size=53044) 2024-11-22T13:37:58,619 INFO [M:0;e025332d312f:37969 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T13:37:58,619 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:37:58,619 INFO [M:0;e025332d312f:37969 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37969 2024-11-22T13:37:58,619 INFO [M:0;e025332d312f:37969 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:37:58,731 INFO [M:0;e025332d312f:37969 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:37:58,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:37:58,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37969-0x10162c32cc00000, quorum=127.0.0.1:64936, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:37:58,735 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4fb491f6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:37:58,736 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@16217bcd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:37:58,736 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:37:58,736 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2972d60a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:37:58,736 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bc5f936{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.log.dir/,STOPPED} 2024-11-22T13:37:58,741 WARN [BP-844324917-172.17.0.2-1732282623906 heartbeating to localhost/127.0.0.1:45327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:37:58,741 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:37:58,741 WARN [BP-844324917-172.17.0.2-1732282623906 heartbeating to localhost/127.0.0.1:45327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-844324917-172.17.0.2-1732282623906 (Datanode Uuid 3c91a1c1-40f6-41a1-b2a0-b21ac4950544) service to localhost/127.0.0.1:45327 2024-11-22T13:37:58,741 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:37:58,742 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/data/data3/current/BP-844324917-172.17.0.2-1732282623906 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:37:58,742 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/data/data4/current/BP-844324917-172.17.0.2-1732282623906 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:37:58,743 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:37:58,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c6abbb8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:37:58,745 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@555a4a92{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:37:58,745 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:37:58,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f411ba{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:37:58,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ec76923{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.log.dir/,STOPPED} 2024-11-22T13:37:58,747 WARN [BP-844324917-172.17.0.2-1732282623906 heartbeating to localhost/127.0.0.1:45327 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:37:58,747 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:37:58,747 WARN [BP-844324917-172.17.0.2-1732282623906 heartbeating to localhost/127.0.0.1:45327 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-844324917-172.17.0.2-1732282623906 (Datanode Uuid 2ef7c1a7-3aee-451a-bb94-8d270a8f8743) service to localhost/127.0.0.1:45327 2024-11-22T13:37:58,747 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:37:58,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/data/data1/current/BP-844324917-172.17.0.2-1732282623906 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:37:58,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/cluster_0f29d4f2-07bd-67be-bbb6-adb2b7826afc/data/data2/current/BP-844324917-172.17.0.2-1732282623906 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:37:58,748 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:37:58,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@678c2527{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:37:58,754 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@191911fe{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:37:58,754 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:37:58,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18701e65{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:37:58,754 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6826318a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.log.dir/,STOPPED} 2024-11-22T13:37:58,760 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T13:37:58,780 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T13:37:58,787 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 182) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45327 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/e025332d312f:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45327 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45327 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:45327 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45327 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:45327 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=53 (was 71), ProcessCount=11 (was 11), AvailableMemoryMB=2361 (was 2399) 2024-11-22T13:37:58,793 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=53, ProcessCount=11, AvailableMemoryMB=2361 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.log.dir so I do NOT create it in target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/57bed770-bbb6-56b3-ede1-67cb65f802f9/hadoop.tmp.dir so I do NOT create it in target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539, deleteOnExit=true 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/test.cache.data in system properties and HBase conf 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.log.dir in system properties and HBase conf 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T13:37:58,794 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T13:37:58,794 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/nfs.dump.dir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/java.io.tmpdir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:37:58,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T13:37:58,796 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T13:37:58,809 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:37:58,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:58,844 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:59,100 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:37:59,101 INFO [regionserver/e025332d312f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:37:59,103 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:37:59,104 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:37:59,104 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:37:59,104 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:37:59,105 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:37:59,105 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41a74ab6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:37:59,105 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d31ee43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:37:59,197 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7986f193{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/java.io.tmpdir/jetty-localhost-34303-hadoop-hdfs-3_4_1-tests_jar-_-any-4484967035859748978/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:37:59,198 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47cbf00e{HTTP/1.1, (http/1.1)}{localhost:34303} 2024-11-22T13:37:59,198 INFO [Time-limited test {}] server.Server(415): Started @248377ms 2024-11-22T13:37:59,209 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:37:59,504 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:37:59,508 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:37:59,509 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:37:59,509 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:37:59,509 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T13:37:59,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7181dda1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:37:59,509 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47c8059{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:37:59,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e483a61{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/java.io.tmpdir/jetty-localhost-45337-hadoop-hdfs-3_4_1-tests_jar-_-any-3556461194216393402/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:37:59,604 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ab86f9f{HTTP/1.1, (http/1.1)}{localhost:45337} 2024-11-22T13:37:59,604 INFO [Time-limited test {}] server.Server(415): Started @248784ms 2024-11-22T13:37:59,605 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:37:59,628 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:37:59,630 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:37:59,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:37:59,630 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:37:59,630 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:37:59,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6ff82d67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:37:59,631 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57390027{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:37:59,721 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62d57d83{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/java.io.tmpdir/jetty-localhost-44195-hadoop-hdfs-3_4_1-tests_jar-_-any-12176434982315506280/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:37:59,722 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7cbc9241{HTTP/1.1, (http/1.1)}{localhost:44195} 2024-11-22T13:37:59,722 INFO [Time-limited test {}] server.Server(415): Started @248901ms 2024-11-22T13:37:59,723 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:37:59,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:37:59,845 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:00,756 WARN [Thread-1968 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/data/data1/current/BP-23462902-172.17.0.2-1732282678811/current, will proceed with Du for space computation calculation, 2024-11-22T13:38:00,756 WARN [Thread-1969 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/data/data2/current/BP-23462902-172.17.0.2-1732282678811/current, will proceed with Du for space computation calculation, 2024-11-22T13:38:00,775 WARN [Thread-1932 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:38:00,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1793ce3b9cb757fd with lease ID 0xab0e331921cf8dc8: Processing first storage report for DS-c9470bcc-b35e-4f3d-9f83-60c44edf15f6 from datanode DatanodeRegistration(127.0.0.1:35875, datanodeUuid=58b21bbf-e607-4a00-b1f2-21128c369588, infoPort=43003, infoSecurePort=0, ipcPort=40609, storageInfo=lv=-57;cid=testClusterID;nsid=883389634;c=1732282678811) 2024-11-22T13:38:00,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1793ce3b9cb757fd with lease ID 0xab0e331921cf8dc8: from storage DS-c9470bcc-b35e-4f3d-9f83-60c44edf15f6 node DatanodeRegistration(127.0.0.1:35875, datanodeUuid=58b21bbf-e607-4a00-b1f2-21128c369588, infoPort=43003, infoSecurePort=0, ipcPort=40609, storageInfo=lv=-57;cid=testClusterID;nsid=883389634;c=1732282678811), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:38:00,777 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1793ce3b9cb757fd with lease ID 0xab0e331921cf8dc8: Processing first storage report for DS-04a699cb-0776-410a-9b35-754524eb5a94 from datanode DatanodeRegistration(127.0.0.1:35875, datanodeUuid=58b21bbf-e607-4a00-b1f2-21128c369588, infoPort=43003, infoSecurePort=0, ipcPort=40609, storageInfo=lv=-57;cid=testClusterID;nsid=883389634;c=1732282678811) 2024-11-22T13:38:00,777 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1793ce3b9cb757fd with lease ID 0xab0e331921cf8dc8: from storage DS-04a699cb-0776-410a-9b35-754524eb5a94 node DatanodeRegistration(127.0.0.1:35875, datanodeUuid=58b21bbf-e607-4a00-b1f2-21128c369588, infoPort=43003, infoSecurePort=0, ipcPort=40609, storageInfo=lv=-57;cid=testClusterID;nsid=883389634;c=1732282678811), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:38:00,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:00,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:00,884 WARN [Thread-1980 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/data/data4/current/BP-23462902-172.17.0.2-1732282678811/current, will proceed with Du for space computation calculation, 2024-11-22T13:38:00,884 WARN [Thread-1979 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/data/data3/current/BP-23462902-172.17.0.2-1732282678811/current, will proceed with Du for space computation calculation, 2024-11-22T13:38:00,905 WARN [Thread-1955 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:38:00,907 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2692a07b8fe3bf50 with lease ID 0xab0e331921cf8dc9: Processing first storage report for DS-a1ca4af5-f3b1-457b-9b08-313c174e33de from datanode DatanodeRegistration(127.0.0.1:44415, datanodeUuid=f68979ea-5d8e-4a31-8f4b-04833ce0c856, infoPort=43259, infoSecurePort=0, ipcPort=33093, storageInfo=lv=-57;cid=testClusterID;nsid=883389634;c=1732282678811) 2024-11-22T13:38:00,907 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2692a07b8fe3bf50 with lease ID 0xab0e331921cf8dc9: from storage DS-a1ca4af5-f3b1-457b-9b08-313c174e33de node DatanodeRegistration(127.0.0.1:44415, datanodeUuid=f68979ea-5d8e-4a31-8f4b-04833ce0c856, infoPort=43259, infoSecurePort=0, ipcPort=33093, storageInfo=lv=-57;cid=testClusterID;nsid=883389634;c=1732282678811), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:38:00,908 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2692a07b8fe3bf50 with lease ID 0xab0e331921cf8dc9: Processing first storage report for DS-4c1c0ca9-5c5d-4c39-9565-372b9442bb6b from datanode DatanodeRegistration(127.0.0.1:44415, datanodeUuid=f68979ea-5d8e-4a31-8f4b-04833ce0c856, infoPort=43259, infoSecurePort=0, ipcPort=33093, storageInfo=lv=-57;cid=testClusterID;nsid=883389634;c=1732282678811) 2024-11-22T13:38:00,908 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2692a07b8fe3bf50 with lease ID 0xab0e331921cf8dc9: from storage DS-4c1c0ca9-5c5d-4c39-9565-372b9442bb6b node DatanodeRegistration(127.0.0.1:44415, datanodeUuid=f68979ea-5d8e-4a31-8f4b-04833ce0c856, infoPort=43259, infoSecurePort=0, ipcPort=33093, storageInfo=lv=-57;cid=testClusterID;nsid=883389634;c=1732282678811), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:38:00,955 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b 2024-11-22T13:38:00,984 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/zookeeper_0, clientPort=64347, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T13:38:00,985 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64347 2024-11-22T13:38:00,985 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:00,987 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:00,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:38:00,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:38:00,997 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b with version=8 2024-11-22T13:38:00,997 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/hbase-staging 2024-11-22T13:38:00,999 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:38:00,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:00,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:00,999 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:38:00,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:00,999 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:38:00,999 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T13:38:00,999 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:38:01,000 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35245 2024-11-22T13:38:01,001 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35245 connecting to ZooKeeper ensemble=127.0.0.1:64347 2024-11-22T13:38:01,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:352450x0, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:38:01,051 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35245-0x10162c402410000 connected 2024-11-22T13:38:01,137 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:01,141 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:01,144 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:38:01,145 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b, hbase.cluster.distributed=false 2024-11-22T13:38:01,148 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:38:01,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35245 2024-11-22T13:38:01,149 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35245 2024-11-22T13:38:01,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35245 2024-11-22T13:38:01,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35245 2024-11-22T13:38:01,150 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35245 2024-11-22T13:38:01,165 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:38:01,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:01,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:01,165 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:38:01,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:01,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:38:01,165 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T13:38:01,165 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:38:01,166 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41207 2024-11-22T13:38:01,167 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41207 connecting to ZooKeeper ensemble=127.0.0.1:64347 2024-11-22T13:38:01,167 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:01,169 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:01,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:412070x0, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:38:01,179 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41207-0x10162c402410001 connected 2024-11-22T13:38:01,179 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:38:01,179 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T13:38:01,180 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T13:38:01,180 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T13:38:01,181 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:38:01,181 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41207 2024-11-22T13:38:01,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41207 2024-11-22T13:38:01,182 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41207 2024-11-22T13:38:01,183 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41207 2024-11-22T13:38:01,183 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41207 2024-11-22T13:38:01,198 DEBUG [M:0;e025332d312f:35245 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e025332d312f:35245 2024-11-22T13:38:01,198 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e025332d312f,35245,1732282680999 2024-11-22T13:38:01,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:38:01,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:38:01,211 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e025332d312f,35245,1732282680999 2024-11-22T13:38:01,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T13:38:01,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,221 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T13:38:01,222 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e025332d312f,35245,1732282680999 from backup master directory 2024-11-22T13:38:01,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e025332d312f,35245,1732282680999 2024-11-22T13:38:01,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:38:01,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:38:01,231 WARN [master/e025332d312f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:38:01,231 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e025332d312f,35245,1732282680999 2024-11-22T13:38:01,236 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/hbase.id] with ID: 4f643f0f-d134-4f41-9065-1eb4414752da 2024-11-22T13:38:01,236 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/.tmp/hbase.id 2024-11-22T13:38:01,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:38:01,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:38:01,242 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/.tmp/hbase.id]:[hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/hbase.id] 2024-11-22T13:38:01,254 INFO [master/e025332d312f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:01,254 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T13:38:01,255 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T13:38:01,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:38:01,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:38:01,272 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:38:01,273 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T13:38:01,273 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:38:01,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:38:01,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:38:01,280 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store 2024-11-22T13:38:01,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:38:01,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:38:01,287 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:01,287 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:38:01,287 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:01,287 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:01,287 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:38:01,287 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:01,287 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:01,287 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282681287Disabling compacts and flushes for region at 1732282681287Disabling writes for close at 1732282681287Writing region close event to WAL at 1732282681287Closed at 1732282681287 2024-11-22T13:38:01,288 WARN [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/.initializing 2024-11-22T13:38:01,288 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/WALs/e025332d312f,35245,1732282680999 2024-11-22T13:38:01,291 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C35245%2C1732282680999, suffix=, logDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/WALs/e025332d312f,35245,1732282680999, archiveDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/oldWALs, maxLogs=10 2024-11-22T13:38:01,292 INFO [master/e025332d312f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C35245%2C1732282680999.1732282681292 2024-11-22T13:38:01,297 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/WALs/e025332d312f,35245,1732282680999/e025332d312f%2C35245%2C1732282680999.1732282681292 2024-11-22T13:38:01,299 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43259:43259),(127.0.0.1/127.0.0.1:43003:43003)] 2024-11-22T13:38:01,302 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:38:01,302 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:01,303 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,303 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,304 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T13:38:01,306 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:01,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:01,306 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T13:38:01,308 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:01,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:38:01,308 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T13:38:01,309 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:01,309 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:38:01,310 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T13:38:01,311 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:01,311 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:38:01,311 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,312 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,312 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,313 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,313 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,314 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T13:38:01,315 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:01,317 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:38:01,317 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858372, jitterRate=0.0914774090051651}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T13:38:01,318 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732282681303Initializing all the Stores at 1732282681304 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282681304Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282681304Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282681304Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282681304Cleaning up temporary data from old regions at 1732282681313 (+9 ms)Region opened successfully at 1732282681318 (+5 ms) 2024-11-22T13:38:01,318 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T13:38:01,321 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f6699ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:38:01,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T13:38:01,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T13:38:01,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T13:38:01,322 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T13:38:01,323 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T13:38:01,323 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T13:38:01,323 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T13:38:01,326 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T13:38:01,327 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T13:38:01,336 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T13:38:01,337 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T13:38:01,337 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T13:38:01,347 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T13:38:01,347 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T13:38:01,348 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T13:38:01,357 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T13:38:01,358 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T13:38:01,368 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T13:38:01,370 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T13:38:01,378 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T13:38:01,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:38:01,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:38:01,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,389 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e025332d312f,35245,1732282680999, sessionid=0x10162c402410000, setting cluster-up flag (Was=false) 2024-11-22T13:38:01,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,441 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T13:38:01,443 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,35245,1732282680999 2024-11-22T13:38:01,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:01,494 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T13:38:01,495 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,35245,1732282680999 2024-11-22T13:38:01,497 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T13:38:01,498 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T13:38:01,498 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T13:38:01,498 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T13:38:01,498 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e025332d312f,35245,1732282680999 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T13:38:01,500 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:38:01,500 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:38:01,500 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:38:01,500 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:38:01,500 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e025332d312f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T13:38:01,500 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,500 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:38:01,500 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,502 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732282711502 2024-11-22T13:38:01,502 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T13:38:01,502 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:38:01,502 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T13:38:01,502 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T13:38:01,502 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T13:38:01,502 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T13:38:01,502 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T13:38:01,502 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T13:38:01,502 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,503 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T13:38:01,503 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T13:38:01,503 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T13:38:01,503 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:01,503 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T13:38:01,504 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T13:38:01,504 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T13:38:01,504 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282681504,5,FailOnTimeoutGroup] 2024-11-22T13:38:01,504 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282681504,5,FailOnTimeoutGroup] 2024-11-22T13:38:01,504 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,504 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T13:38:01,504 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,504 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:38:01,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:38:01,510 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T13:38:01,510 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b 2024-11-22T13:38:01,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:38:01,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:38:01,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:01,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:38:01,518 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:38:01,518 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:01,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:01,518 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:38:01,519 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:38:01,519 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:01,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:01,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:38:01,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:38:01,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:01,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:01,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:38:01,522 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:38:01,522 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:01,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:01,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:38:01,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740 2024-11-22T13:38:01,523 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740 2024-11-22T13:38:01,524 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:38:01,525 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:38:01,525 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:38:01,526 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:38:01,528 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:38:01,528 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845417, jitterRate=0.07500375807285309}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:38:01,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732282681515Initializing all the Stores at 1732282681516 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282681516Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282681516Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282681516Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282681516Cleaning up temporary data from old regions at 1732282681525 (+9 ms)Region opened successfully at 1732282681529 (+4 ms) 2024-11-22T13:38:01,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:38:01,529 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:38:01,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:38:01,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:38:01,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:38:01,530 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:38:01,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282681529Disabling compacts and flushes for region at 1732282681529Disabling writes for close at 1732282681529Writing region close event to WAL at 1732282681529Closed at 1732282681529 2024-11-22T13:38:01,531 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:38:01,531 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T13:38:01,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T13:38:01,533 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:38:01,534 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T13:38:01,586 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(746): ClusterId : 4f643f0f-d134-4f41-9065-1eb4414752da 2024-11-22T13:38:01,586 DEBUG [RS:0;e025332d312f:41207 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T13:38:01,601 DEBUG [RS:0;e025332d312f:41207 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T13:38:01,601 DEBUG [RS:0;e025332d312f:41207 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T13:38:01,612 DEBUG [RS:0;e025332d312f:41207 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T13:38:01,613 DEBUG [RS:0;e025332d312f:41207 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78736565, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:38:01,629 DEBUG [RS:0;e025332d312f:41207 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e025332d312f:41207 2024-11-22T13:38:01,629 INFO [RS:0;e025332d312f:41207 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T13:38:01,629 INFO [RS:0;e025332d312f:41207 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T13:38:01,629 DEBUG [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T13:38:01,630 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(2659): reportForDuty to master=e025332d312f,35245,1732282680999 with port=41207, startcode=1732282681164 2024-11-22T13:38:01,630 DEBUG [RS:0;e025332d312f:41207 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T13:38:01,632 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59067, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T13:38:01,632 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35245 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e025332d312f,41207,1732282681164 2024-11-22T13:38:01,632 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35245 {}] master.ServerManager(517): Registering regionserver=e025332d312f,41207,1732282681164 2024-11-22T13:38:01,634 DEBUG [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b 2024-11-22T13:38:01,634 DEBUG [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43267 2024-11-22T13:38:01,634 DEBUG [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T13:38:01,641 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:38:01,642 DEBUG [RS:0;e025332d312f:41207 {}] zookeeper.ZKUtil(111): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e025332d312f,41207,1732282681164 2024-11-22T13:38:01,642 WARN [RS:0;e025332d312f:41207 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:38:01,642 INFO [RS:0;e025332d312f:41207 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:38:01,642 DEBUG [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164 2024-11-22T13:38:01,643 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e025332d312f,41207,1732282681164] 2024-11-22T13:38:01,646 INFO [RS:0;e025332d312f:41207 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T13:38:01,649 INFO [RS:0;e025332d312f:41207 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T13:38:01,649 INFO [RS:0;e025332d312f:41207 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T13:38:01,649 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,649 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T13:38:01,650 INFO [RS:0;e025332d312f:41207 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T13:38:01,650 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:38:01,651 DEBUG [RS:0;e025332d312f:41207 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:38:01,652 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,652 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,652 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,652 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,652 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,652 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41207,1732282681164-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:38:01,668 INFO [RS:0;e025332d312f:41207 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T13:38:01,669 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41207,1732282681164-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,669 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,669 INFO [RS:0;e025332d312f:41207 {}] regionserver.Replication(171): e025332d312f,41207,1732282681164 started 2024-11-22T13:38:01,681 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:01,681 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(1482): Serving as e025332d312f,41207,1732282681164, RpcServer on e025332d312f/172.17.0.2:41207, sessionid=0x10162c402410001 2024-11-22T13:38:01,681 DEBUG [RS:0;e025332d312f:41207 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T13:38:01,681 DEBUG [RS:0;e025332d312f:41207 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e025332d312f,41207,1732282681164 2024-11-22T13:38:01,681 DEBUG [RS:0;e025332d312f:41207 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,41207,1732282681164' 2024-11-22T13:38:01,681 DEBUG [RS:0;e025332d312f:41207 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T13:38:01,682 DEBUG [RS:0;e025332d312f:41207 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T13:38:01,682 DEBUG [RS:0;e025332d312f:41207 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T13:38:01,682 DEBUG [RS:0;e025332d312f:41207 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T13:38:01,682 DEBUG [RS:0;e025332d312f:41207 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e025332d312f,41207,1732282681164 2024-11-22T13:38:01,682 DEBUG [RS:0;e025332d312f:41207 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,41207,1732282681164' 2024-11-22T13:38:01,683 DEBUG [RS:0;e025332d312f:41207 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T13:38:01,683 DEBUG [RS:0;e025332d312f:41207 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T13:38:01,683 DEBUG [RS:0;e025332d312f:41207 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T13:38:01,683 INFO [RS:0;e025332d312f:41207 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T13:38:01,683 INFO [RS:0;e025332d312f:41207 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T13:38:01,684 WARN [e025332d312f:35245 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T13:38:01,788 INFO [RS:0;e025332d312f:41207 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C41207%2C1732282681164, suffix=, logDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164, archiveDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/oldWALs, maxLogs=32 2024-11-22T13:38:01,789 INFO [RS:0;e025332d312f:41207 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41207%2C1732282681164.1732282681789 2024-11-22T13:38:01,797 INFO [RS:0;e025332d312f:41207 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.1732282681789 2024-11-22T13:38:01,804 DEBUG [RS:0;e025332d312f:41207 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43003:43003),(127.0.0.1/127.0.0.1:43259:43259)] 2024-11-22T13:38:01,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:01,846 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:01,934 DEBUG [e025332d312f:35245 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T13:38:01,935 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e025332d312f,41207,1732282681164 2024-11-22T13:38:01,937 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,41207,1732282681164, state=OPENING 2024-11-22T13:38:02,000 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T13:38:02,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:02,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:02,012 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:38:02,013 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:38:02,013 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:38:02,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,41207,1732282681164}] 2024-11-22T13:38:02,169 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T13:38:02,173 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39523, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T13:38:02,182 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T13:38:02,182 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:38:02,184 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C41207%2C1732282681164.meta, suffix=.meta, logDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164, archiveDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/oldWALs, maxLogs=32 2024-11-22T13:38:02,184 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41207%2C1732282681164.meta.1732282682184.meta 2024-11-22T13:38:02,189 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.meta.1732282682184.meta 2024-11-22T13:38:02,192 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43003:43003),(127.0.0.1/127.0.0.1:43259:43259)] 2024-11-22T13:38:02,193 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:38:02,193 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T13:38:02,193 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T13:38:02,193 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T13:38:02,193 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T13:38:02,193 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:02,193 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T13:38:02,194 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T13:38:02,195 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:38:02,196 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:38:02,196 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:02,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:02,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:38:02,197 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:38:02,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:02,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:02,197 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:38:02,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:38:02,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:02,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:02,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:38:02,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:38:02,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:02,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:02,200 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:38:02,200 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740 2024-11-22T13:38:02,201 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740 2024-11-22T13:38:02,202 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:38:02,202 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:38:02,203 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:38:02,204 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:38:02,205 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838385, jitterRate=0.06606283783912659}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:38:02,205 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T13:38:02,205 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732282682194Writing region info on filesystem at 1732282682194Initializing all the Stores at 1732282682194Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282682194Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282682195 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282682195Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282682195Cleaning up temporary data from old regions at 1732282682202 (+7 ms)Running coprocessor post-open hooks at 1732282682205 (+3 ms)Region opened successfully at 1732282682205 2024-11-22T13:38:02,206 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732282682168 2024-11-22T13:38:02,208 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T13:38:02,208 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T13:38:02,209 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,41207,1732282681164 2024-11-22T13:38:02,210 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,41207,1732282681164, state=OPEN 2024-11-22T13:38:02,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:38:02,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:38:02,251 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e025332d312f,41207,1732282681164 2024-11-22T13:38:02,251 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:38:02,251 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:38:02,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T13:38:02,257 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,41207,1732282681164 in 238 msec 2024-11-22T13:38:02,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T13:38:02,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 726 msec 2024-11-22T13:38:02,263 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:38:02,263 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T13:38:02,264 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:38:02,265 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,41207,1732282681164, seqNum=-1] 2024-11-22T13:38:02,265 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:38:02,266 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33107, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:38:02,272 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 773 msec 2024-11-22T13:38:02,272 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732282682272, completionTime=-1 2024-11-22T13:38:02,272 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T13:38:02,272 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T13:38:02,275 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T13:38:02,275 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732282742275 2024-11-22T13:38:02,275 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732282802275 2024-11-22T13:38:02,275 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T13:38:02,275 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35245,1732282680999-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:02,275 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35245,1732282680999-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:02,275 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35245,1732282680999-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:02,275 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e025332d312f:35245, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:02,275 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:02,276 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:02,277 DEBUG [master/e025332d312f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T13:38:02,279 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.048sec 2024-11-22T13:38:02,279 INFO [master/e025332d312f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T13:38:02,279 INFO [master/e025332d312f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T13:38:02,279 INFO [master/e025332d312f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T13:38:02,279 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T13:38:02,279 INFO [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T13:38:02,279 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35245,1732282680999-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:38:02,279 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35245,1732282680999-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T13:38:02,282 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T13:38:02,282 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T13:38:02,282 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,35245,1732282680999-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:02,285 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@335c5487, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:38:02,285 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e025332d312f,35245,-1 for getting cluster id 2024-11-22T13:38:02,285 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T13:38:02,286 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4f643f0f-d134-4f41-9065-1eb4414752da' 2024-11-22T13:38:02,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T13:38:02,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4f643f0f-d134-4f41-9065-1eb4414752da" 2024-11-22T13:38:02,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10db42ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:38:02,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e025332d312f,35245,-1] 2024-11-22T13:38:02,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T13:38:02,287 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:02,288 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48534, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T13:38:02,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5958c6d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:38:02,289 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:38:02,290 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,41207,1732282681164, seqNum=-1] 2024-11-22T13:38:02,290 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:38:02,291 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52082, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:38:02,293 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e025332d312f,35245,1732282680999 2024-11-22T13:38:02,293 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:02,296 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T13:38:02,296 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T13:38:02,297 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is e025332d312f,35245,1732282680999 2024-11-22T13:38:02,297 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@14a7d87a 2024-11-22T13:38:02,297 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T13:38:02,298 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48538, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T13:38:02,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35245 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T13:38:02,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35245 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T13:38:02,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35245 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:38:02,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35245 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-22T13:38:02,302 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T13:38:02,302 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:02,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35245 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-22T13:38:02,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35245 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:38:02,303 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T13:38:02,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741835_1011 (size=381) 2024-11-22T13:38:02,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741835_1011 (size=381) 2024-11-22T13:38:02,311 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 113e9e9fb4ce69e600badf85506cbae7, NAME => 'TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b 2024-11-22T13:38:02,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741836_1012 (size=64) 2024-11-22T13:38:02,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741836_1012 (size=64) 2024-11-22T13:38:02,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:02,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 113e9e9fb4ce69e600badf85506cbae7, disabling compactions & flushes 2024-11-22T13:38:02,320 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:02,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:02,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. after waiting 0 ms 2024-11-22T13:38:02,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:02,320 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:02,320 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 113e9e9fb4ce69e600badf85506cbae7: Waiting for close lock at 1732282682320Disabling compacts and flushes for region at 1732282682320Disabling writes for close at 1732282682320Writing region close event to WAL at 1732282682320Closed at 1732282682320 2024-11-22T13:38:02,321 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T13:38:02,321 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732282682321"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732282682321"}]},"ts":"1732282682321"} 2024-11-22T13:38:02,324 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T13:38:02,325 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T13:38:02,325 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282682325"}]},"ts":"1732282682325"} 2024-11-22T13:38:02,327 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-22T13:38:02,327 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=113e9e9fb4ce69e600badf85506cbae7, ASSIGN}] 2024-11-22T13:38:02,329 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=113e9e9fb4ce69e600badf85506cbae7, ASSIGN 2024-11-22T13:38:02,329 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=113e9e9fb4ce69e600badf85506cbae7, ASSIGN; state=OFFLINE, location=e025332d312f,41207,1732282681164; forceNewPlan=false, retain=false 2024-11-22T13:38:02,481 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=113e9e9fb4ce69e600badf85506cbae7, regionState=OPENING, regionLocation=e025332d312f,41207,1732282681164 2024-11-22T13:38:02,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=113e9e9fb4ce69e600badf85506cbae7, ASSIGN because future has completed 2024-11-22T13:38:02,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 113e9e9fb4ce69e600badf85506cbae7, server=e025332d312f,41207,1732282681164}] 2024-11-22T13:38:02,644 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:02,644 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 113e9e9fb4ce69e600badf85506cbae7, NAME => 'TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:38:02,645 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,645 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:02,645 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,645 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,646 INFO [StoreOpener-113e9e9fb4ce69e600badf85506cbae7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,647 INFO [StoreOpener-113e9e9fb4ce69e600badf85506cbae7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 113e9e9fb4ce69e600badf85506cbae7 columnFamilyName info 2024-11-22T13:38:02,647 DEBUG [StoreOpener-113e9e9fb4ce69e600badf85506cbae7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:02,648 INFO [StoreOpener-113e9e9fb4ce69e600badf85506cbae7-1 {}] regionserver.HStore(327): Store=113e9e9fb4ce69e600badf85506cbae7/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:38:02,648 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,649 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,649 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,649 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,649 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,651 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,653 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:38:02,653 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 113e9e9fb4ce69e600badf85506cbae7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=693894, jitterRate=-0.11766830086708069}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T13:38:02,653 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:02,654 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 113e9e9fb4ce69e600badf85506cbae7: Running coprocessor pre-open hook at 1732282682645Writing region info on filesystem at 1732282682645Initializing all the Stores at 1732282682646 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282682646Cleaning up temporary data from old regions at 1732282682649 (+3 ms)Running coprocessor post-open hooks at 1732282682653 (+4 ms)Region opened successfully at 1732282682654 (+1 ms) 2024-11-22T13:38:02,655 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., pid=6, masterSystemTime=1732282682641 2024-11-22T13:38:02,658 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:02,658 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:02,658 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=113e9e9fb4ce69e600badf85506cbae7, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,41207,1732282681164 2024-11-22T13:38:02,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 113e9e9fb4ce69e600badf85506cbae7, server=e025332d312f,41207,1732282681164 because future has completed 2024-11-22T13:38:02,665 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T13:38:02,665 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 113e9e9fb4ce69e600badf85506cbae7, server=e025332d312f,41207,1732282681164 in 175 msec 2024-11-22T13:38:02,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T13:38:02,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=113e9e9fb4ce69e600badf85506cbae7, ASSIGN in 338 msec 2024-11-22T13:38:02,669 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T13:38:02,669 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732282682669"}]},"ts":"1732282682669"} 2024-11-22T13:38:02,671 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-22T13:38:02,672 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T13:38:02,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 373 msec 2024-11-22T13:38:02,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:02,847 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:03,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,249 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,250 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,271 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,779 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T13:38:03,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,785 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,807 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,808 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,809 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,815 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:03,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:03,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:04,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:04,848 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:05,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:05,850 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:06,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:06,851 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:06,898 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-22T13:38:06,898 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T13:38:06,900 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T13:38:07,647 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T13:38:07,648 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-22T13:38:07,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:07,852 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:08,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:08,853 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:09,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:09,854 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:10,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:10,856 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:11,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:11,857 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:12,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35245 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T13:38:12,370 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-22T13:38:12,370 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-22T13:38:12,377 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-22T13:38:12,377 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:12,380 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., hostname=e025332d312f,41207,1732282681164, seqNum=2] 2024-11-22T13:38:12,405 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T13:38:12,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,443 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,447 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:12,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:12,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 113e9e9fb4ce69e600badf85506cbae7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:38:12,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/be78ab80e3d843389b32529cffa2eed4 is 1080, key is row0001/info:/1732282692382/Put/seqid=0 2024-11-22T13:38:12,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741837_1013 (size=12509) 2024-11-22T13:38:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741837_1013 (size=12509) 2024-11-22T13:38:12,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/be78ab80e3d843389b32529cffa2eed4 2024-11-22T13:38:12,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/be78ab80e3d843389b32529cffa2eed4 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/be78ab80e3d843389b32529cffa2eed4 2024-11-22T13:38:12,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/be78ab80e3d843389b32529cffa2eed4, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T13:38:12,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for 113e9e9fb4ce69e600badf85506cbae7 in 37ms, sequenceid=11, compaction requested=false 2024-11-22T13:38:12,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:12,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:12,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 113e9e9fb4ce69e600badf85506cbae7 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-11-22T13:38:12,492 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/a47ed0b9d26a4ce29032271b050c43ca is 1080, key is row0008/info:/1732282692450/Put/seqid=0 2024-11-22T13:38:12,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741838_1014 (size=27607) 2024-11-22T13:38:12,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741838_1014 (size=27607) 2024-11-22T13:38:12,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:12,858 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:12,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=35 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/a47ed0b9d26a4ce29032271b050c43ca 2024-11-22T13:38:12,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/a47ed0b9d26a4ce29032271b050c43ca as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca 2024-11-22T13:38:12,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca, entries=21, sequenceid=35, filesize=27.0 K 2024-11-22T13:38:12,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=4.20 KB/4304 for 113e9e9fb4ce69e600badf85506cbae7 in 431ms, sequenceid=35, compaction requested=false 2024-11-22T13:38:12,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:12,920 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.2 K, sizeToCheck=16.0 K 2024-11-22T13:38:12,920 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:12,920 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca because midkey is the same as first or last row 2024-11-22T13:38:13,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:13,860 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:14,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:14,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 113e9e9fb4ce69e600badf85506cbae7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:38:14,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/0539b46d1c5b4244956ca9d867ab228d is 1080, key is row0029/info:/1732282692489/Put/seqid=0 2024-11-22T13:38:14,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741839_1015 (size=12509) 2024-11-22T13:38:14,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741839_1015 (size=12509) 2024-11-22T13:38:14,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/0539b46d1c5b4244956ca9d867ab228d 2024-11-22T13:38:14,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/0539b46d1c5b4244956ca9d867ab228d as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/0539b46d1c5b4244956ca9d867ab228d 2024-11-22T13:38:14,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/0539b46d1c5b4244956ca9d867ab228d, entries=7, sequenceid=45, filesize=12.2 K 2024-11-22T13:38:14,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 113e9e9fb4ce69e600badf85506cbae7 in 25ms, sequenceid=45, compaction requested=true 2024-11-22T13:38:14,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:14,533 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=51.4 K, sizeToCheck=16.0 K 2024-11-22T13:38:14,534 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:14,534 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca because midkey is the same as first or last row 2024-11-22T13:38:14,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 113e9e9fb4ce69e600badf85506cbae7:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:14,534 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:14,534 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:38:14,535 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 52625 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:38:14,535 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 113e9e9fb4ce69e600badf85506cbae7/info is initiating minor compaction (all files) 2024-11-22T13:38:14,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:14,535 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 113e9e9fb4ce69e600badf85506cbae7/info in TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:14,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 113e9e9fb4ce69e600badf85506cbae7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T13:38:14,535 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/be78ab80e3d843389b32529cffa2eed4, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/0539b46d1c5b4244956ca9d867ab228d] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp, totalSize=51.4 K 2024-11-22T13:38:14,536 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting be78ab80e3d843389b32529cffa2eed4, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732282692382 2024-11-22T13:38:14,536 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting a47ed0b9d26a4ce29032271b050c43ca, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=35, earliestPutTs=1732282692450 2024-11-22T13:38:14,536 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0539b46d1c5b4244956ca9d867ab228d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732282692489 2024-11-22T13:38:14,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/12e9a14c717e471198dc61830e0742dd is 1080, key is row0036/info:/1732282694510/Put/seqid=0 2024-11-22T13:38:14,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741840_1016 (size=17894) 2024-11-22T13:38:14,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741840_1016 (size=17894) 2024-11-22T13:38:14,545 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/12e9a14c717e471198dc61830e0742dd 2024-11-22T13:38:14,549 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 113e9e9fb4ce69e600badf85506cbae7#info#compaction#59 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:14,550 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/518734c9b3194fc7add514a577a9ef31 is 1080, key is row0001/info:/1732282692382/Put/seqid=0 2024-11-22T13:38:14,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/12e9a14c717e471198dc61830e0742dd as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/12e9a14c717e471198dc61830e0742dd 2024-11-22T13:38:14,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741841_1017 (size=42824) 2024-11-22T13:38:14,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741841_1017 (size=42824) 2024-11-22T13:38:14,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/12e9a14c717e471198dc61830e0742dd, entries=12, sequenceid=60, filesize=17.5 K 2024-11-22T13:38:14,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=12.61 KB/12912 for 113e9e9fb4ce69e600badf85506cbae7 in 28ms, sequenceid=60, compaction requested=false 2024-11-22T13:38:14,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:14,563 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.9 K, sizeToCheck=16.0 K 2024-11-22T13:38:14,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:14,563 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:14,563 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca because midkey is the same as first or last row 2024-11-22T13:38:14,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 113e9e9fb4ce69e600badf85506cbae7 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T13:38:14,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/db60ab73838f427d833e8f293ce9db03 is 1080, key is row0048/info:/1732282694536/Put/seqid=0 2024-11-22T13:38:14,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741842_1018 (size=18987) 2024-11-22T13:38:14,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741842_1018 (size=18987) 2024-11-22T13:38:14,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/db60ab73838f427d833e8f293ce9db03 2024-11-22T13:38:14,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/db60ab73838f427d833e8f293ce9db03 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/db60ab73838f427d833e8f293ce9db03 2024-11-22T13:38:14,589 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/db60ab73838f427d833e8f293ce9db03, entries=13, sequenceid=76, filesize=18.5 K 2024-11-22T13:38:14,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for 113e9e9fb4ce69e600badf85506cbae7 in 27ms, sequenceid=76, compaction requested=false 2024-11-22T13:38:14,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:14,590 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=87.4 K, sizeToCheck=16.0 K 2024-11-22T13:38:14,590 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:14,590 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca because midkey is the same as first or last row 2024-11-22T13:38:14,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:14,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:14,973 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/518734c9b3194fc7add514a577a9ef31 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31 2024-11-22T13:38:14,979 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 113e9e9fb4ce69e600badf85506cbae7/info of 113e9e9fb4ce69e600badf85506cbae7 into 518734c9b3194fc7add514a577a9ef31(size=41.8 K), total size for store is 77.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:14,979 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:14,979 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., storeName=113e9e9fb4ce69e600badf85506cbae7/info, priority=13, startTime=1732282694534; duration=0sec 2024-11-22T13:38:14,979 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.8 K, sizeToCheck=16.0 K 2024-11-22T13:38:14,979 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:14,979 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31 because midkey is the same as first or last row 2024-11-22T13:38:14,979 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.8 K, sizeToCheck=16.0 K 2024-11-22T13:38:14,979 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:14,979 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31 because midkey is the same as first or last row 2024-11-22T13:38:14,979 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.8 K, sizeToCheck=16.0 K 2024-11-22T13:38:14,980 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:14,980 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31 because midkey is the same as first or last row 2024-11-22T13:38:14,980 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:14,980 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 113e9e9fb4ce69e600badf85506cbae7:info 2024-11-22T13:38:15,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:15,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:16,583 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 113e9e9fb4ce69e600badf85506cbae7 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:38:16,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/cd550584e9114f36b2f7c7aca4b5e942 is 1080, key is row0061/info:/1732282694564/Put/seqid=0 2024-11-22T13:38:16,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741843_1019 (size=12509) 2024-11-22T13:38:16,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741843_1019 (size=12509) 2024-11-22T13:38:16,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/cd550584e9114f36b2f7c7aca4b5e942 2024-11-22T13:38:16,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/cd550584e9114f36b2f7c7aca4b5e942 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/cd550584e9114f36b2f7c7aca4b5e942 2024-11-22T13:38:16,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/cd550584e9114f36b2f7c7aca4b5e942, entries=7, sequenceid=87, filesize=12.2 K 2024-11-22T13:38:16,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 113e9e9fb4ce69e600badf85506cbae7 in 26ms, sequenceid=87, compaction requested=true 2024-11-22T13:38:16,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:16,609 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=90.1 K, sizeToCheck=16.0 K 2024-11-22T13:38:16,609 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:16,609 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31 because midkey is the same as first or last row 2024-11-22T13:38:16,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 113e9e9fb4ce69e600badf85506cbae7:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:16,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:16,610 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T13:38:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:16,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 113e9e9fb4ce69e600badf85506cbae7 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T13:38:16,611 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 92214 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T13:38:16,611 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 113e9e9fb4ce69e600badf85506cbae7/info is initiating minor compaction (all files) 2024-11-22T13:38:16,611 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 113e9e9fb4ce69e600badf85506cbae7/info in TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:16,611 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/12e9a14c717e471198dc61830e0742dd, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/db60ab73838f427d833e8f293ce9db03, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/cd550584e9114f36b2f7c7aca4b5e942] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp, totalSize=90.1 K 2024-11-22T13:38:16,612 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 518734c9b3194fc7add514a577a9ef31, keycount=35, bloomtype=ROW, size=41.8 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732282692382 2024-11-22T13:38:16,612 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 12e9a14c717e471198dc61830e0742dd, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1732282694510 2024-11-22T13:38:16,612 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting db60ab73838f427d833e8f293ce9db03, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732282694536 2024-11-22T13:38:16,613 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd550584e9114f36b2f7c7aca4b5e942, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732282694564 2024-11-22T13:38:16,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/28ece6d8b8dd458a90ac58143568f0fb is 1080, key is row0068/info:/1732282696585/Put/seqid=0 2024-11-22T13:38:16,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741844_1020 (size=17894) 2024-11-22T13:38:16,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741844_1020 (size=17894) 2024-11-22T13:38:16,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/28ece6d8b8dd458a90ac58143568f0fb 2024-11-22T13:38:16,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/28ece6d8b8dd458a90ac58143568f0fb as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/28ece6d8b8dd458a90ac58143568f0fb 2024-11-22T13:38:16,629 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 113e9e9fb4ce69e600badf85506cbae7#info#compaction#63 average throughput is 22.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:16,629 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/589ce21507ca42f8af4dd026705da313 is 1080, key is row0001/info:/1732282692382/Put/seqid=0 2024-11-22T13:38:16,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/28ece6d8b8dd458a90ac58143568f0fb, entries=12, sequenceid=102, filesize=17.5 K 2024-11-22T13:38:16,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 113e9e9fb4ce69e600badf85506cbae7 in 27ms, sequenceid=102, compaction requested=false 2024-11-22T13:38:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=107.5 K, sizeToCheck=16.0 K 2024-11-22T13:38:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31 because midkey is the same as first or last row 2024-11-22T13:38:16,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:16,639 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 113e9e9fb4ce69e600badf85506cbae7 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T13:38:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741845_1021 (size=77566) 2024-11-22T13:38:16,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741845_1021 (size=77566) 2024-11-22T13:38:16,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/6e8d6afd83b747a582023f5fa3ddf751 is 1080, key is row0080/info:/1732282696611/Put/seqid=0 2024-11-22T13:38:16,650 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/589ce21507ca42f8af4dd026705da313 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/589ce21507ca42f8af4dd026705da313 2024-11-22T13:38:16,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741846_1022 (size=18987) 2024-11-22T13:38:16,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741846_1022 (size=18987) 2024-11-22T13:38:16,656 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 113e9e9fb4ce69e600badf85506cbae7/info of 113e9e9fb4ce69e600badf85506cbae7 into 589ce21507ca42f8af4dd026705da313(size=75.7 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:16,657 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:16,657 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., storeName=113e9e9fb4ce69e600badf85506cbae7/info, priority=12, startTime=1732282696609; duration=0sec 2024-11-22T13:38:16,657 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-22T13:38:16,657 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:16,657 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-22T13:38:16,657 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:16,657 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-11-22T13:38:16,657 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T13:38:16,658 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:16,658 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:16,658 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 113e9e9fb4ce69e600badf85506cbae7:info 2024-11-22T13:38:16,659 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35245 {}] assignment.AssignmentManager(1363): Split request from e025332d312f,41207,1732282681164, parent={ENCODED => 113e9e9fb4ce69e600badf85506cbae7, NAME => 'TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-22T13:38:16,663 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35245 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=e025332d312f,41207,1732282681164 2024-11-22T13:38:16,666 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35245 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=113e9e9fb4ce69e600badf85506cbae7, daughterA=4cad7f0310a0b729b7e6f35cdf635d8f, daughterB=91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:16,667 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=113e9e9fb4ce69e600badf85506cbae7, daughterA=4cad7f0310a0b729b7e6f35cdf635d8f, daughterB=91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:16,667 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=113e9e9fb4ce69e600badf85506cbae7, daughterA=4cad7f0310a0b729b7e6f35cdf635d8f, daughterB=91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:16,667 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=113e9e9fb4ce69e600badf85506cbae7, daughterA=4cad7f0310a0b729b7e6f35cdf635d8f, daughterB=91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:16,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=113e9e9fb4ce69e600badf85506cbae7, UNASSIGN}] 2024-11-22T13:38:16,675 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=113e9e9fb4ce69e600badf85506cbae7, UNASSIGN 2024-11-22T13:38:16,676 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=113e9e9fb4ce69e600badf85506cbae7, regionState=CLOSING, regionLocation=e025332d312f,41207,1732282681164 2024-11-22T13:38:16,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=113e9e9fb4ce69e600badf85506cbae7, UNASSIGN because future has completed 2024-11-22T13:38:16,679 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-22T13:38:16,679 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 113e9e9fb4ce69e600badf85506cbae7, server=e025332d312f,41207,1732282681164}] 2024-11-22T13:38:16,840 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:16,840 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-22T13:38:16,841 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 113e9e9fb4ce69e600badf85506cbae7, disabling compactions & flushes 2024-11-22T13:38:16,841 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1993): waiting for 0 compactions & cache flush to complete for region TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:16,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:16,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:17,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/6e8d6afd83b747a582023f5fa3ddf751 2024-11-22T13:38:17,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/6e8d6afd83b747a582023f5fa3ddf751 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/6e8d6afd83b747a582023f5fa3ddf751 2024-11-22T13:38:17,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/6e8d6afd83b747a582023f5fa3ddf751, entries=13, sequenceid=118, filesize=18.5 K 2024-11-22T13:38:17,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=4.20 KB/4304 for 113e9e9fb4ce69e600badf85506cbae7 in 438ms, sequenceid=118, compaction requested=true 2024-11-22T13:38:17,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 113e9e9fb4ce69e600badf85506cbae7: 2024-11-22T13:38:17,078 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:17,078 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:17,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 113e9e9fb4ce69e600badf85506cbae7:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:17,078 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. after waiting 0 ms 2024-11-22T13:38:17,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:17,078 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:17,078 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. because compaction request was cancelled 2024-11-22T13:38:17,078 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 113e9e9fb4ce69e600badf85506cbae7:info 2024-11-22T13:38:17,078 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 113e9e9fb4ce69e600badf85506cbae7 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-22T13:38:17,083 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/838990b953be497aa7ff7f09407854b0 is 1080, key is row0093/info:/1732282696640/Put/seqid=0 2024-11-22T13:38:17,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741847_1023 (size=9270) 2024-11-22T13:38:17,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741847_1023 (size=9270) 2024-11-22T13:38:17,088 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/838990b953be497aa7ff7f09407854b0 2024-11-22T13:38:17,093 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/.tmp/info/838990b953be497aa7ff7f09407854b0 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/838990b953be497aa7ff7f09407854b0 2024-11-22T13:38:17,098 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/838990b953be497aa7ff7f09407854b0, entries=4, sequenceid=126, filesize=9.1 K 2024-11-22T13:38:17,099 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 113e9e9fb4ce69e600badf85506cbae7 in 21ms, sequenceid=126, compaction requested=true 2024-11-22T13:38:17,100 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/be78ab80e3d843389b32529cffa2eed4, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/0539b46d1c5b4244956ca9d867ab228d, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/12e9a14c717e471198dc61830e0742dd, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/db60ab73838f427d833e8f293ce9db03, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/cd550584e9114f36b2f7c7aca4b5e942] to archive 2024-11-22T13:38:17,101 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T13:38:17,103 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/be78ab80e3d843389b32529cffa2eed4 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/be78ab80e3d843389b32529cffa2eed4 2024-11-22T13:38:17,104 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/a47ed0b9d26a4ce29032271b050c43ca 2024-11-22T13:38:17,105 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/518734c9b3194fc7add514a577a9ef31 2024-11-22T13:38:17,106 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/0539b46d1c5b4244956ca9d867ab228d to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/0539b46d1c5b4244956ca9d867ab228d 2024-11-22T13:38:17,107 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/12e9a14c717e471198dc61830e0742dd to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/12e9a14c717e471198dc61830e0742dd 2024-11-22T13:38:17,108 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/db60ab73838f427d833e8f293ce9db03 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/db60ab73838f427d833e8f293ce9db03 2024-11-22T13:38:17,109 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/cd550584e9114f36b2f7c7aca4b5e942 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/cd550584e9114f36b2f7c7aca4b5e942 2024-11-22T13:38:17,115 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=1 2024-11-22T13:38:17,116 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. 2024-11-22T13:38:17,116 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 113e9e9fb4ce69e600badf85506cbae7: Waiting for close lock at 1732282696841Running coprocessor pre-close hooks at 1732282696841Disabling compacts and flushes for region at 1732282696841Disabling writes for close at 1732282697078 (+237 ms)Obtaining lock to block concurrent updates at 1732282697078Preparing flush snapshotting stores in 113e9e9fb4ce69e600badf85506cbae7 at 1732282697078Finished memstore snapshotting TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., syncing WAL and waiting on mvcc, flushsize=dataSize=4304, getHeapSize=4848, getOffHeapSize=0, getCellsCount=4 at 1732282697078Flushing stores of TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. at 1732282697079 (+1 ms)Flushing 113e9e9fb4ce69e600badf85506cbae7/info: creating writer at 1732282697080 (+1 ms)Flushing 113e9e9fb4ce69e600badf85506cbae7/info: appending metadata at 1732282697082 (+2 ms)Flushing 113e9e9fb4ce69e600badf85506cbae7/info: closing flushed file at 1732282697082Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18fa13d5: reopening flushed file at 1732282697093 (+11 ms)Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 113e9e9fb4ce69e600badf85506cbae7 in 21ms, sequenceid=126, compaction requested=true at 1732282697099 (+6 ms)Writing region close event to WAL at 1732282697111 (+12 ms)Running coprocessor post-close hooks at 1732282697116 (+5 ms)Closed at 1732282697116 2024-11-22T13:38:17,118 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,119 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=113e9e9fb4ce69e600badf85506cbae7, regionState=CLOSED 2024-11-22T13:38:17,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 113e9e9fb4ce69e600badf85506cbae7, server=e025332d312f,41207,1732282681164 because future has completed 2024-11-22T13:38:17,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-22T13:38:17,124 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 113e9e9fb4ce69e600badf85506cbae7, server=e025332d312f,41207,1732282681164 in 443 msec 2024-11-22T13:38:17,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T13:38:17,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=113e9e9fb4ce69e600badf85506cbae7, UNASSIGN in 450 msec 2024-11-22T13:38:17,133 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:17,136 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=113e9e9fb4ce69e600badf85506cbae7, threads=4 2024-11-22T13:38:17,138 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/6e8d6afd83b747a582023f5fa3ddf751 for region: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,138 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/838990b953be497aa7ff7f09407854b0 for region: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,138 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/589ce21507ca42f8af4dd026705da313 for region: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,138 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/28ece6d8b8dd458a90ac58143568f0fb for region: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,146 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/6e8d6afd83b747a582023f5fa3ddf751, top=true 2024-11-22T13:38:17,147 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/28ece6d8b8dd458a90ac58143568f0fb, top=true 2024-11-22T13:38:17,147 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/838990b953be497aa7ff7f09407854b0, top=true 2024-11-22T13:38:17,155 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-28ece6d8b8dd458a90ac58143568f0fb for child: 91ec716bc6d7903852bfd20857daaa93, parent: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,155 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-6e8d6afd83b747a582023f5fa3ddf751 for child: 91ec716bc6d7903852bfd20857daaa93, parent: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,155 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-838990b953be497aa7ff7f09407854b0 for child: 91ec716bc6d7903852bfd20857daaa93, parent: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,155 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/28ece6d8b8dd458a90ac58143568f0fb for region: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,155 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/6e8d6afd83b747a582023f5fa3ddf751 for region: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,155 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/838990b953be497aa7ff7f09407854b0 for region: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741848_1024 (size=27) 2024-11-22T13:38:17,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741848_1024 (size=27) 2024-11-22T13:38:17,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741849_1025 (size=27) 2024-11-22T13:38:17,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741849_1025 (size=27) 2024-11-22T13:38:17,167 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/589ce21507ca42f8af4dd026705da313 for region: 113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:17,169 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 113e9e9fb4ce69e600badf85506cbae7 Daughter A: [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7] storefiles, Daughter B: [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-28ece6d8b8dd458a90ac58143568f0fb, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-6e8d6afd83b747a582023f5fa3ddf751, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-838990b953be497aa7ff7f09407854b0] storefiles. 2024-11-22T13:38:17,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741850_1026 (size=71) 2024-11-22T13:38:17,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741850_1026 (size=71) 2024-11-22T13:38:17,179 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:17,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741851_1027 (size=71) 2024-11-22T13:38:17,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741851_1027 (size=71) 2024-11-22T13:38:17,193 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:17,204 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=-1 2024-11-22T13:38:17,206 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=-1 2024-11-22T13:38:17,209 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732282697209"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732282697209"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732282697209"}]},"ts":"1732282697209"} 2024-11-22T13:38:17,209 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732282697209"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732282697209"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732282697209"}]},"ts":"1732282697209"} 2024-11-22T13:38:17,209 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732282697209"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732282697209"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732282697209"}]},"ts":"1732282697209"} 2024-11-22T13:38:17,226 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4cad7f0310a0b729b7e6f35cdf635d8f, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91ec716bc6d7903852bfd20857daaa93, ASSIGN}] 2024-11-22T13:38:17,227 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4cad7f0310a0b729b7e6f35cdf635d8f, ASSIGN 2024-11-22T13:38:17,227 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91ec716bc6d7903852bfd20857daaa93, ASSIGN 2024-11-22T13:38:17,228 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4cad7f0310a0b729b7e6f35cdf635d8f, ASSIGN; state=SPLITTING_NEW, location=e025332d312f,41207,1732282681164; forceNewPlan=false, retain=false 2024-11-22T13:38:17,228 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91ec716bc6d7903852bfd20857daaa93, ASSIGN; state=SPLITTING_NEW, location=e025332d312f,41207,1732282681164; forceNewPlan=false, retain=false 2024-11-22T13:38:17,379 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=91ec716bc6d7903852bfd20857daaa93, regionState=OPENING, regionLocation=e025332d312f,41207,1732282681164 2024-11-22T13:38:17,379 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4cad7f0310a0b729b7e6f35cdf635d8f, regionState=OPENING, regionLocation=e025332d312f,41207,1732282681164 2024-11-22T13:38:17,381 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4cad7f0310a0b729b7e6f35cdf635d8f, ASSIGN because future has completed 2024-11-22T13:38:17,382 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4cad7f0310a0b729b7e6f35cdf635d8f, server=e025332d312f,41207,1732282681164}] 2024-11-22T13:38:17,382 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91ec716bc6d7903852bfd20857daaa93, ASSIGN because future has completed 2024-11-22T13:38:17,527 INFO [FSHLog-0-hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData-prefix:e025332d312f,35245,1732282680999 {}] wal.AbstractFSWAL(1368): Slow sync cost: 144 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44415,DS-a1ca4af5-f3b1-457b-9b08-313c174e33de,DISK], DatanodeInfoWithStorage[127.0.0.1:35875,DS-c9470bcc-b35e-4f3d-9f83-60c44edf15f6,DISK]] 2024-11-22T13:38:17,528 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91ec716bc6d7903852bfd20857daaa93, server=e025332d312f,41207,1732282681164}] 2024-11-22T13:38:17,687 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:17,687 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 4cad7f0310a0b729b7e6f35cdf635d8f, NAME => 'TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-22T13:38:17,688 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,688 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:17,688 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,688 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,691 INFO [StoreOpener-4cad7f0310a0b729b7e6f35cdf635d8f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,693 INFO [StoreOpener-4cad7f0310a0b729b7e6f35cdf635d8f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4cad7f0310a0b729b7e6f35cdf635d8f columnFamilyName info 2024-11-22T13:38:17,693 DEBUG [StoreOpener-4cad7f0310a0b729b7e6f35cdf635d8f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:17,704 DEBUG [StoreOpener-4cad7f0310a0b729b7e6f35cdf635d8f-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7->hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/589ce21507ca42f8af4dd026705da313-bottom 2024-11-22T13:38:17,705 INFO [StoreOpener-4cad7f0310a0b729b7e6f35cdf635d8f-1 {}] regionserver.HStore(327): Store=4cad7f0310a0b729b7e6f35cdf635d8f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:38:17,705 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,706 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,707 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,707 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,707 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,709 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,709 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 4cad7f0310a0b729b7e6f35cdf635d8f; next sequenceid=130; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=822605, jitterRate=0.04599687457084656}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T13:38:17,710 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:17,710 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 4cad7f0310a0b729b7e6f35cdf635d8f: Running coprocessor pre-open hook at 1732282697689Writing region info on filesystem at 1732282697689Initializing all the Stores at 1732282697690 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282697690Cleaning up temporary data from old regions at 1732282697707 (+17 ms)Running coprocessor post-open hooks at 1732282697710 (+3 ms)Region opened successfully at 1732282697710 2024-11-22T13:38:17,711 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f., pid=12, masterSystemTime=1732282697679 2024-11-22T13:38:17,711 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 4cad7f0310a0b729b7e6f35cdf635d8f:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:17,711 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:17,711 DEBUG [RS:0;e025332d312f:41207-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T13:38:17,712 INFO [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:17,712 DEBUG [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.HStore(1541): 4cad7f0310a0b729b7e6f35cdf635d8f/info is initiating minor compaction (all files) 2024-11-22T13:38:17,712 INFO [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 4cad7f0310a0b729b7e6f35cdf635d8f/info in TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:17,712 INFO [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7->hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/589ce21507ca42f8af4dd026705da313-bottom] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/.tmp, totalSize=75.7 K 2024-11-22T13:38:17,713 DEBUG [RS:0;e025332d312f:41207-longCompactions-0 {}] compactions.Compactor(225): Compacting 589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732282692382 2024-11-22T13:38:17,713 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:17,713 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:17,713 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:17,714 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 91ec716bc6d7903852bfd20857daaa93, NAME => 'TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-22T13:38:17,714 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,714 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:17,714 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=4cad7f0310a0b729b7e6f35cdf635d8f, regionState=OPEN, openSeqNum=130, regionLocation=e025332d312f,41207,1732282681164 2024-11-22T13:38:17,714 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,714 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,716 INFO [StoreOpener-91ec716bc6d7903852bfd20857daaa93-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,716 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-22T13:38:17,716 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-22T13:38:17,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-22T13:38:17,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4cad7f0310a0b729b7e6f35cdf635d8f, server=e025332d312f,41207,1732282681164 because future has completed 2024-11-22T13:38:17,716 INFO [StoreOpener-91ec716bc6d7903852bfd20857daaa93-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 91ec716bc6d7903852bfd20857daaa93 columnFamilyName info 2024-11-22T13:38:17,717 DEBUG [StoreOpener-91ec716bc6d7903852bfd20857daaa93-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:17,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-22T13:38:17,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 4cad7f0310a0b729b7e6f35cdf635d8f, server=e025332d312f,41207,1732282681164 in 336 msec 2024-11-22T13:38:17,722 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=4cad7f0310a0b729b7e6f35cdf635d8f, ASSIGN in 495 msec 2024-11-22T13:38:17,726 DEBUG [StoreOpener-91ec716bc6d7903852bfd20857daaa93-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7->hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/589ce21507ca42f8af4dd026705da313-top 2024-11-22T13:38:17,731 DEBUG [StoreOpener-91ec716bc6d7903852bfd20857daaa93-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-28ece6d8b8dd458a90ac58143568f0fb 2024-11-22T13:38:17,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/info/2c184c4e299e494d9b87fd16c1adb7da is 193, key is TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93./info:regioninfo/1732282697379/Put/seqid=0 2024-11-22T13:38:17,734 INFO [RS:0;e025332d312f:41207-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 4cad7f0310a0b729b7e6f35cdf635d8f#info#compaction#66 average throughput is 15.65 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:17,734 DEBUG [RS:0;e025332d312f:41207-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/.tmp/info/4016d8bff2304b51a66aca4ee7ff197c is 1080, key is row0001/info:/1732282692382/Put/seqid=0 2024-11-22T13:38:17,737 DEBUG [StoreOpener-91ec716bc6d7903852bfd20857daaa93-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-6e8d6afd83b747a582023f5fa3ddf751 2024-11-22T13:38:17,741 DEBUG [StoreOpener-91ec716bc6d7903852bfd20857daaa93-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-838990b953be497aa7ff7f09407854b0 2024-11-22T13:38:17,741 INFO [StoreOpener-91ec716bc6d7903852bfd20857daaa93-1 {}] regionserver.HStore(327): Store=91ec716bc6d7903852bfd20857daaa93/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:38:17,741 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,742 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,743 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,743 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,744 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,745 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741852_1028 (size=9847) 2024-11-22T13:38:17,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741852_1028 (size=9847) 2024-11-22T13:38:17,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741853_1029 (size=70862) 2024-11-22T13:38:17,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741853_1029 (size=70862) 2024-11-22T13:38:17,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/info/2c184c4e299e494d9b87fd16c1adb7da 2024-11-22T13:38:17,747 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 91ec716bc6d7903852bfd20857daaa93; next sequenceid=130; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863580, jitterRate=0.09809915721416473}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T13:38:17,747 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:17,747 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 91ec716bc6d7903852bfd20857daaa93: Running coprocessor pre-open hook at 1732282697714Writing region info on filesystem at 1732282697714Initializing all the Stores at 1732282697715 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282697715Cleaning up temporary data from old regions at 1732282697744 (+29 ms)Running coprocessor post-open hooks at 1732282697747 (+3 ms)Region opened successfully at 1732282697747 2024-11-22T13:38:17,748 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., pid=13, masterSystemTime=1732282697679 2024-11-22T13:38:17,748 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 91ec716bc6d7903852bfd20857daaa93:info, priority=-2147483648, current under compaction store size is 2 2024-11-22T13:38:17,748 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T13:38:17,748 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:17,751 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:17,751 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 91ec716bc6d7903852bfd20857daaa93/info is initiating minor compaction (all files) 2024-11-22T13:38:17,751 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91ec716bc6d7903852bfd20857daaa93/info in TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:17,751 DEBUG [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:17,751 INFO [RS_OPEN_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:17,752 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7->hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/589ce21507ca42f8af4dd026705da313-top, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-28ece6d8b8dd458a90ac58143568f0fb, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-6e8d6afd83b747a582023f5fa3ddf751, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-838990b953be497aa7ff7f09407854b0] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp, totalSize=120.8 K 2024-11-22T13:38:17,752 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=91ec716bc6d7903852bfd20857daaa93, regionState=OPEN, openSeqNum=130, regionLocation=e025332d312f,41207,1732282681164 2024-11-22T13:38:17,752 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7, keycount=33, bloomtype=ROW, size=75.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732282692382 2024-11-22T13:38:17,753 DEBUG [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/.tmp/info/4016d8bff2304b51a66aca4ee7ff197c as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/info/4016d8bff2304b51a66aca4ee7ff197c 2024-11-22T13:38:17,753 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-28ece6d8b8dd458a90ac58143568f0fb, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1732282696585 2024-11-22T13:38:17,754 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-6e8d6afd83b747a582023f5fa3ddf751, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732282696611 2024-11-22T13:38:17,754 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91ec716bc6d7903852bfd20857daaa93, server=e025332d312f,41207,1732282681164 because future has completed 2024-11-22T13:38:17,755 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-838990b953be497aa7ff7f09407854b0, keycount=4, bloomtype=ROW, size=9.1 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732282696640 2024-11-22T13:38:17,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-22T13:38:17,759 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 91ec716bc6d7903852bfd20857daaa93, server=e025332d312f,41207,1732282681164 in 228 msec 2024-11-22T13:38:17,761 INFO [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 4cad7f0310a0b729b7e6f35cdf635d8f/info of 4cad7f0310a0b729b7e6f35cdf635d8f into 4016d8bff2304b51a66aca4ee7ff197c(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:17,761 DEBUG [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 4cad7f0310a0b729b7e6f35cdf635d8f: 2024-11-22T13:38:17,761 INFO [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f., storeName=4cad7f0310a0b729b7e6f35cdf635d8f/info, priority=15, startTime=1732282697711; duration=0sec 2024-11-22T13:38:17,761 DEBUG [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:17,761 DEBUG [RS:0;e025332d312f:41207-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 4cad7f0310a0b729b7e6f35cdf635d8f:info 2024-11-22T13:38:17,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-22T13:38:17,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=91ec716bc6d7903852bfd20857daaa93, ASSIGN in 534 msec 2024-11-22T13:38:17,765 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=113e9e9fb4ce69e600badf85506cbae7, daughterA=4cad7f0310a0b729b7e6f35cdf635d8f, daughterB=91ec716bc6d7903852bfd20857daaa93 in 1.1000 sec 2024-11-22T13:38:17,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/ns/066fe0e81b954096b76b474e15d8dc6a is 43, key is default/ns:d/1732282682267/Put/seqid=0 2024-11-22T13:38:17,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741854_1030 (size=5153) 2024-11-22T13:38:17,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741854_1030 (size=5153) 2024-11-22T13:38:17,785 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91ec716bc6d7903852bfd20857daaa93#info#compaction#69 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:17,786 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/87ae5d1637384b3ca5cd55aca0d2fed4 is 1080, key is row0062/info:/1732282694566/Put/seqid=0 2024-11-22T13:38:17,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741855_1031 (size=43081) 2024-11-22T13:38:17,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741855_1031 (size=43081) 2024-11-22T13:38:17,797 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/87ae5d1637384b3ca5cd55aca0d2fed4 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/87ae5d1637384b3ca5cd55aca0d2fed4 2024-11-22T13:38:17,802 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 91ec716bc6d7903852bfd20857daaa93/info of 91ec716bc6d7903852bfd20857daaa93 into 87ae5d1637384b3ca5cd55aca0d2fed4(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:17,802 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:17,802 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., storeName=91ec716bc6d7903852bfd20857daaa93/info, priority=12, startTime=1732282697748; duration=0sec 2024-11-22T13:38:17,802 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:17,802 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91ec716bc6d7903852bfd20857daaa93:info 2024-11-22T13:38:17,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:17,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:18,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/ns/066fe0e81b954096b76b474e15d8dc6a 2024-11-22T13:38:18,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/table/e17dc5768c5d4f9fbe91e83ca8c8d878 is 65, key is TestLogRolling-testLogRolling/table:state/1732282682669/Put/seqid=0 2024-11-22T13:38:18,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741856_1032 (size=5340) 2024-11-22T13:38:18,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741856_1032 (size=5340) 2024-11-22T13:38:18,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/table/e17dc5768c5d4f9fbe91e83ca8c8d878 2024-11-22T13:38:18,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/info/2c184c4e299e494d9b87fd16c1adb7da as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/info/2c184c4e299e494d9b87fd16c1adb7da 2024-11-22T13:38:18,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/info/2c184c4e299e494d9b87fd16c1adb7da, entries=30, sequenceid=17, filesize=9.6 K 2024-11-22T13:38:18,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/ns/066fe0e81b954096b76b474e15d8dc6a as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/ns/066fe0e81b954096b76b474e15d8dc6a 2024-11-22T13:38:18,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/ns/066fe0e81b954096b76b474e15d8dc6a, entries=2, sequenceid=17, filesize=5.0 K 2024-11-22T13:38:18,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/table/e17dc5768c5d4f9fbe91e83ca8c8d878 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/table/e17dc5768c5d4f9fbe91e83ca8c8d878 2024-11-22T13:38:18,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/table/e17dc5768c5d4f9fbe91e83ca8c8d878, entries=2, sequenceid=17, filesize=5.2 K 2024-11-22T13:38:18,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 526ms, sequenceid=17, compaction requested=false 2024-11-22T13:38:18,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T13:38:18,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:52082 deadline: 1732282708652, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. is not online on e025332d312f,41207,1732282681164 2024-11-22T13:38:18,682 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., hostname=e025332d312f,41207,1732282681164, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., hostname=e025332d312f,41207,1732282681164, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. is not online on e025332d312f,41207,1732282681164 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T13:38:18,683 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., hostname=e025332d312f,41207,1732282681164, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7. is not online on e025332d312f,41207,1732282681164 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T13:38:18,683 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732282682299.113e9e9fb4ce69e600badf85506cbae7., hostname=e025332d312f,41207,1732282681164, seqNum=2 from cache 2024-11-22T13:38:18,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:18,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:19,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:19,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:20,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:20,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:21,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:21,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:22,622 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T13:38:22,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,656 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,658 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T13:38:22,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:22,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:23,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:23,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:24,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:24,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:25,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:25,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:26,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:26,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:27,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:27,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:28,712 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., hostname=e025332d312f,41207,1732282681164, seqNum=130] 2024-11-22T13:38:28,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:28,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:38:28,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/270d1b9bb17e417aafc1bcada745294b is 1080, key is row0097/info:/1732282708713/Put/seqid=0 2024-11-22T13:38:28,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741857_1033 (size=12516) 2024-11-22T13:38:28,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741857_1033 (size=12516) 2024-11-22T13:38:28,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/270d1b9bb17e417aafc1bcada745294b 2024-11-22T13:38:28,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/270d1b9bb17e417aafc1bcada745294b as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/270d1b9bb17e417aafc1bcada745294b 2024-11-22T13:38:28,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/270d1b9bb17e417aafc1bcada745294b, entries=7, sequenceid=140, filesize=12.2 K 2024-11-22T13:38:28,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 91ec716bc6d7903852bfd20857daaa93 in 26ms, sequenceid=140, compaction requested=false 2024-11-22T13:38:28,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:28,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:28,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T13:38:28,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/ed1d8adc29bf4b6fb65ce9ff72534830 is 1080, key is row0104/info:/1732282708724/Put/seqid=0 2024-11-22T13:38:28,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741858_1034 (size=17906) 2024-11-22T13:38:28,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741858_1034 (size=17906) 2024-11-22T13:38:28,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/ed1d8adc29bf4b6fb65ce9ff72534830 2024-11-22T13:38:28,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/ed1d8adc29bf4b6fb65ce9ff72534830 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/ed1d8adc29bf4b6fb65ce9ff72534830 2024-11-22T13:38:28,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/ed1d8adc29bf4b6fb65ce9ff72534830, entries=12, sequenceid=155, filesize=17.5 K 2024-11-22T13:38:28,772 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 91ec716bc6d7903852bfd20857daaa93 in 23ms, sequenceid=155, compaction requested=true 2024-11-22T13:38:28,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:28,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91ec716bc6d7903852bfd20857daaa93:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:28,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:28,772 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:38:28,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:28,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T13:38:28,774 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:38:28,774 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 91ec716bc6d7903852bfd20857daaa93/info is initiating minor compaction (all files) 2024-11-22T13:38:28,774 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91ec716bc6d7903852bfd20857daaa93/info in TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:28,774 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/87ae5d1637384b3ca5cd55aca0d2fed4, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/270d1b9bb17e417aafc1bcada745294b, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/ed1d8adc29bf4b6fb65ce9ff72534830] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp, totalSize=71.8 K 2024-11-22T13:38:28,774 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 87ae5d1637384b3ca5cd55aca0d2fed4, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732282694566 2024-11-22T13:38:28,775 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 270d1b9bb17e417aafc1bcada745294b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732282708713 2024-11-22T13:38:28,775 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed1d8adc29bf4b6fb65ce9ff72534830, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732282708724 2024-11-22T13:38:28,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/a357922d1c87402fa8477f88f620cd24 is 1080, key is row0116/info:/1732282708751/Put/seqid=0 2024-11-22T13:38:28,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741859_1035 (size=16828) 2024-11-22T13:38:28,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741859_1035 (size=16828) 2024-11-22T13:38:28,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/a357922d1c87402fa8477f88f620cd24 2024-11-22T13:38:28,786 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91ec716bc6d7903852bfd20857daaa93#info#compaction#74 average throughput is 27.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:28,786 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/c6f235ea66b34fbc8cea28b7cb6a12a6 is 1080, key is row0062/info:/1732282694566/Put/seqid=0 2024-11-22T13:38:28,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/a357922d1c87402fa8477f88f620cd24 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/a357922d1c87402fa8477f88f620cd24 2024-11-22T13:38:28,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/a357922d1c87402fa8477f88f620cd24, entries=11, sequenceid=169, filesize=16.4 K 2024-11-22T13:38:28,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741860_1036 (size=63733) 2024-11-22T13:38:28,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741860_1036 (size=63733) 2024-11-22T13:38:28,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=2.10 KB/2152 for 91ec716bc6d7903852bfd20857daaa93 in 21ms, sequenceid=169, compaction requested=false 2024-11-22T13:38:28,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:28,798 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/c6f235ea66b34fbc8cea28b7cb6a12a6 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/c6f235ea66b34fbc8cea28b7cb6a12a6 2024-11-22T13:38:28,803 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91ec716bc6d7903852bfd20857daaa93/info of 91ec716bc6d7903852bfd20857daaa93 into c6f235ea66b34fbc8cea28b7cb6a12a6(size=62.2 K), total size for store is 78.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:28,803 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:28,803 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., storeName=91ec716bc6d7903852bfd20857daaa93/info, priority=13, startTime=1732282708772; duration=0sec 2024-11-22T13:38:28,803 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:28,803 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91ec716bc6d7903852bfd20857daaa93:info 2024-11-22T13:38:28,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:28,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:29,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:29,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:30,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:30,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:38:30,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/b75191ddea2f47a19b48f3c0a8e2b951 is 1080, key is row0127/info:/1732282708774/Put/seqid=0 2024-11-22T13:38:30,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741861_1037 (size=12516) 2024-11-22T13:38:30,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741861_1037 (size=12516) 2024-11-22T13:38:30,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/b75191ddea2f47a19b48f3c0a8e2b951 2024-11-22T13:38:30,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/b75191ddea2f47a19b48f3c0a8e2b951 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/b75191ddea2f47a19b48f3c0a8e2b951 2024-11-22T13:38:30,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/b75191ddea2f47a19b48f3c0a8e2b951, entries=7, sequenceid=180, filesize=12.2 K 2024-11-22T13:38:30,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 91ec716bc6d7903852bfd20857daaa93 in 25ms, sequenceid=180, compaction requested=true 2024-11-22T13:38:30,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:30,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91ec716bc6d7903852bfd20857daaa93:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:30,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:30,819 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:38:30,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:30,820 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T13:38:30,820 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93077 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:38:30,820 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 91ec716bc6d7903852bfd20857daaa93/info is initiating minor compaction (all files) 2024-11-22T13:38:30,821 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91ec716bc6d7903852bfd20857daaa93/info in TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:30,821 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/c6f235ea66b34fbc8cea28b7cb6a12a6, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/a357922d1c87402fa8477f88f620cd24, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/b75191ddea2f47a19b48f3c0a8e2b951] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp, totalSize=90.9 K 2024-11-22T13:38:30,821 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting c6f235ea66b34fbc8cea28b7cb6a12a6, keycount=54, bloomtype=ROW, size=62.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732282694566 2024-11-22T13:38:30,821 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting a357922d1c87402fa8477f88f620cd24, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732282708751 2024-11-22T13:38:30,822 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting b75191ddea2f47a19b48f3c0a8e2b951, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732282708774 2024-11-22T13:38:30,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/6783c5e118dd4af496a207e01b2bd3cb is 1080, key is row0134/info:/1732282710795/Put/seqid=0 2024-11-22T13:38:30,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741862_1038 (size=16828) 2024-11-22T13:38:30,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741862_1038 (size=16828) 2024-11-22T13:38:30,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/6783c5e118dd4af496a207e01b2bd3cb 2024-11-22T13:38:30,834 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91ec716bc6d7903852bfd20857daaa93#info#compaction#77 average throughput is 36.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:30,835 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/1279a819f25f4459b74f46a7d8ee8722 is 1080, key is row0062/info:/1732282694566/Put/seqid=0 2024-11-22T13:38:30,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/6783c5e118dd4af496a207e01b2bd3cb as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/6783c5e118dd4af496a207e01b2bd3cb 2024-11-22T13:38:30,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741863_1039 (size=83312) 2024-11-22T13:38:30,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741863_1039 (size=83312) 2024-11-22T13:38:30,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/6783c5e118dd4af496a207e01b2bd3cb, entries=11, sequenceid=194, filesize=16.4 K 2024-11-22T13:38:30,844 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 91ec716bc6d7903852bfd20857daaa93 in 24ms, sequenceid=194, compaction requested=false 2024-11-22T13:38:30,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:30,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:30,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T13:38:30,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/9e1302a092f649fdb46ee7a8ae209431 is 1080, key is row0145/info:/1732282710821/Put/seqid=0 2024-11-22T13:38:30,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741864_1040 (size=17906) 2024-11-22T13:38:30,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741864_1040 (size=17906) 2024-11-22T13:38:30,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/9e1302a092f649fdb46ee7a8ae209431 2024-11-22T13:38:30,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/9e1302a092f649fdb46ee7a8ae209431 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/9e1302a092f649fdb46ee7a8ae209431 2024-11-22T13:38:30,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/9e1302a092f649fdb46ee7a8ae209431, entries=12, sequenceid=209, filesize=17.5 K 2024-11-22T13:38:30,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=4.20 KB/4304 for 91ec716bc6d7903852bfd20857daaa93 in 21ms, sequenceid=209, compaction requested=false 2024-11-22T13:38:30,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:30,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:30,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:30,954 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T13:38:31,255 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/1279a819f25f4459b74f46a7d8ee8722 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/1279a819f25f4459b74f46a7d8ee8722 2024-11-22T13:38:31,263 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91ec716bc6d7903852bfd20857daaa93/info of 91ec716bc6d7903852bfd20857daaa93 into 1279a819f25f4459b74f46a7d8ee8722(size=81.4 K), total size for store is 115.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:31,263 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:31,263 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., storeName=91ec716bc6d7903852bfd20857daaa93/info, priority=13, startTime=1732282710819; duration=0sec 2024-11-22T13:38:31,263 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:31,263 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91ec716bc6d7903852bfd20857daaa93:info 2024-11-22T13:38:31,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:31,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:32,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:32,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:38:32,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/d9e26c2d9ed74e2fb49cb0f74a8ddd9c is 1080, key is row0157/info:/1732282710848/Put/seqid=0 2024-11-22T13:38:32,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741865_1041 (size=12516) 2024-11-22T13:38:32,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741865_1041 (size=12516) 2024-11-22T13:38:32,879 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=220 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/d9e26c2d9ed74e2fb49cb0f74a8ddd9c 2024-11-22T13:38:32,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:32,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:32,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/d9e26c2d9ed74e2fb49cb0f74a8ddd9c as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/d9e26c2d9ed74e2fb49cb0f74a8ddd9c 2024-11-22T13:38:32,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/d9e26c2d9ed74e2fb49cb0f74a8ddd9c, entries=7, sequenceid=220, filesize=12.2 K 2024-11-22T13:38:32,910 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 91ec716bc6d7903852bfd20857daaa93 in 43ms, sequenceid=220, compaction requested=true 2024-11-22T13:38:32,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:32,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91ec716bc6d7903852bfd20857daaa93:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:32,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:32,910 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T13:38:32,912 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 130562 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T13:38:32,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:32,912 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 91ec716bc6d7903852bfd20857daaa93/info is initiating minor compaction (all files) 2024-11-22T13:38:32,912 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-22T13:38:32,912 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91ec716bc6d7903852bfd20857daaa93/info in TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:32,912 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/1279a819f25f4459b74f46a7d8ee8722, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/6783c5e118dd4af496a207e01b2bd3cb, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/9e1302a092f649fdb46ee7a8ae209431, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/d9e26c2d9ed74e2fb49cb0f74a8ddd9c] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp, totalSize=127.5 K 2024-11-22T13:38:32,913 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1279a819f25f4459b74f46a7d8ee8722, keycount=72, bloomtype=ROW, size=81.4 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1732282694566 2024-11-22T13:38:32,913 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6783c5e118dd4af496a207e01b2bd3cb, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732282710795 2024-11-22T13:38:32,913 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9e1302a092f649fdb46ee7a8ae209431, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732282710821 2024-11-22T13:38:32,914 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9e26c2d9ed74e2fb49cb0f74a8ddd9c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732282710848 2024-11-22T13:38:32,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/3d2085790d9d413993e72f65836fe492 is 1080, key is row0164/info:/1732282712869/Put/seqid=0 2024-11-22T13:38:32,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741866_1042 (size=20078) 2024-11-22T13:38:32,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741866_1042 (size=20078) 2024-11-22T13:38:32,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/3d2085790d9d413993e72f65836fe492 2024-11-22T13:38:32,930 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91ec716bc6d7903852bfd20857daaa93#info#compaction#81 average throughput is 34.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:32,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/3d2085790d9d413993e72f65836fe492 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/3d2085790d9d413993e72f65836fe492 2024-11-22T13:38:32,930 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/55e161683e144251a22986c7eeebc8aa is 1080, key is row0062/info:/1732282694566/Put/seqid=0 2024-11-22T13:38:32,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/3d2085790d9d413993e72f65836fe492, entries=14, sequenceid=237, filesize=19.6 K 2024-11-22T13:38:32,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741867_1043 (size=115796) 2024-11-22T13:38:32,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741867_1043 (size=115796) 2024-11-22T13:38:32,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 91ec716bc6d7903852bfd20857daaa93 in 26ms, sequenceid=237, compaction requested=false 2024-11-22T13:38:32,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:32,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:32,939 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T13:38:32,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/cabf0f58560b44d5b3f104a6c5294adc is 1080, key is row0178/info:/1732282712913/Put/seqid=0 2024-11-22T13:38:32,943 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/55e161683e144251a22986c7eeebc8aa as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/55e161683e144251a22986c7eeebc8aa 2024-11-22T13:38:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741868_1044 (size=17906) 2024-11-22T13:38:32,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741868_1044 (size=17906) 2024-11-22T13:38:32,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/cabf0f58560b44d5b3f104a6c5294adc 2024-11-22T13:38:32,950 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 91ec716bc6d7903852bfd20857daaa93/info of 91ec716bc6d7903852bfd20857daaa93 into 55e161683e144251a22986c7eeebc8aa(size=113.1 K), total size for store is 132.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:32,950 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:32,950 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., storeName=91ec716bc6d7903852bfd20857daaa93/info, priority=12, startTime=1732282712910; duration=0sec 2024-11-22T13:38:32,950 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:32,950 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91ec716bc6d7903852bfd20857daaa93:info 2024-11-22T13:38:32,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/cabf0f58560b44d5b3f104a6c5294adc as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cabf0f58560b44d5b3f104a6c5294adc 2024-11-22T13:38:32,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cabf0f58560b44d5b3f104a6c5294adc, entries=12, sequenceid=252, filesize=17.5 K 2024-11-22T13:38:32,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=3.15 KB/3228 for 91ec716bc6d7903852bfd20857daaa93 in 22ms, sequenceid=252, compaction requested=true 2024-11-22T13:38:32,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:32,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91ec716bc6d7903852bfd20857daaa93:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:32,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:32,960 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:38:32,961 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 153780 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:38:32,961 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 91ec716bc6d7903852bfd20857daaa93/info is initiating minor compaction (all files) 2024-11-22T13:38:32,961 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91ec716bc6d7903852bfd20857daaa93/info in TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:32,961 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/55e161683e144251a22986c7eeebc8aa, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/3d2085790d9d413993e72f65836fe492, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cabf0f58560b44d5b3f104a6c5294adc] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp, totalSize=150.2 K 2024-11-22T13:38:32,962 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 55e161683e144251a22986c7eeebc8aa, keycount=102, bloomtype=ROW, size=113.1 K, encoding=NONE, compression=NONE, seqNum=220, earliestPutTs=1732282694566 2024-11-22T13:38:32,962 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3d2085790d9d413993e72f65836fe492, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732282712869 2024-11-22T13:38:32,962 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting cabf0f58560b44d5b3f104a6c5294adc, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732282712913 2024-11-22T13:38:32,973 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91ec716bc6d7903852bfd20857daaa93#info#compaction#83 average throughput is 65.67 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:32,973 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/89995c13a72443b6b15d596cf06753f2 is 1080, key is row0062/info:/1732282694566/Put/seqid=0 2024-11-22T13:38:32,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741869_1045 (size=144131) 2024-11-22T13:38:32,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741869_1045 (size=144131) 2024-11-22T13:38:32,983 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/89995c13a72443b6b15d596cf06753f2 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/89995c13a72443b6b15d596cf06753f2 2024-11-22T13:38:32,989 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91ec716bc6d7903852bfd20857daaa93/info of 91ec716bc6d7903852bfd20857daaa93 into 89995c13a72443b6b15d596cf06753f2(size=140.8 K), total size for store is 140.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:32,989 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:32,989 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., storeName=91ec716bc6d7903852bfd20857daaa93/info, priority=13, startTime=1732282712960; duration=0sec 2024-11-22T13:38:32,990 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:32,990 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91ec716bc6d7903852bfd20857daaa93:info 2024-11-22T13:38:33,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:33,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:34,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:34,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:34,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:34,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:38:34,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/5185efc2fc774d6e886cb239aa1c903b is 1080, key is row0190/info:/1732282712940/Put/seqid=0 2024-11-22T13:38:34,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741870_1046 (size=12521) 2024-11-22T13:38:34,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741870_1046 (size=12521) 2024-11-22T13:38:34,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=264 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/5185efc2fc774d6e886cb239aa1c903b 2024-11-22T13:38:34,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/5185efc2fc774d6e886cb239aa1c903b as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/5185efc2fc774d6e886cb239aa1c903b 2024-11-22T13:38:34,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/5185efc2fc774d6e886cb239aa1c903b, entries=7, sequenceid=264, filesize=12.2 K 2024-11-22T13:38:34,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 91ec716bc6d7903852bfd20857daaa93 in 22ms, sequenceid=264, compaction requested=false 2024-11-22T13:38:34,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:34,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:34,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T13:38:34,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/4e110ac91c294302bda11bf4e9a79cf2 is 1080, key is row0197/info:/1732282714964/Put/seqid=0 2024-11-22T13:38:34,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741871_1047 (size=16839) 2024-11-22T13:38:34,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741871_1047 (size=16839) 2024-11-22T13:38:34,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/4e110ac91c294302bda11bf4e9a79cf2 2024-11-22T13:38:35,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/4e110ac91c294302bda11bf4e9a79cf2 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/4e110ac91c294302bda11bf4e9a79cf2 2024-11-22T13:38:35,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/4e110ac91c294302bda11bf4e9a79cf2, entries=11, sequenceid=278, filesize=16.4 K 2024-11-22T13:38:35,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 91ec716bc6d7903852bfd20857daaa93 in 20ms, sequenceid=278, compaction requested=true 2024-11-22T13:38:35,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:35,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91ec716bc6d7903852bfd20857daaa93:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:35,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:35,006 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:38:35,007 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 173491 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:38:35,007 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 91ec716bc6d7903852bfd20857daaa93/info is initiating minor compaction (all files) 2024-11-22T13:38:35,007 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91ec716bc6d7903852bfd20857daaa93/info in TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:35,007 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/89995c13a72443b6b15d596cf06753f2, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/5185efc2fc774d6e886cb239aa1c903b, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/4e110ac91c294302bda11bf4e9a79cf2] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp, totalSize=169.4 K 2024-11-22T13:38:35,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:35,007 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 89995c13a72443b6b15d596cf06753f2, keycount=128, bloomtype=ROW, size=140.8 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732282694566 2024-11-22T13:38:35,007 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T13:38:35,007 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5185efc2fc774d6e886cb239aa1c903b, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=264, earliestPutTs=1732282712940 2024-11-22T13:38:35,008 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4e110ac91c294302bda11bf4e9a79cf2, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732282714964 2024-11-22T13:38:35,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/31045a9de14d4318bc158119d1eace0f is 1080, key is row0208/info:/1732282714986/Put/seqid=0 2024-11-22T13:38:35,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741872_1048 (size=17918) 2024-11-22T13:38:35,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741872_1048 (size=17918) 2024-11-22T13:38:35,020 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91ec716bc6d7903852bfd20857daaa93#info#compaction#87 average throughput is 74.91 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:35,021 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/88698ae104444612995b4354c5ce1450 is 1080, key is row0062/info:/1732282694566/Put/seqid=0 2024-11-22T13:38:35,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741873_1049 (size=163657) 2024-11-22T13:38:35,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741873_1049 (size=163657) 2024-11-22T13:38:35,028 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/88698ae104444612995b4354c5ce1450 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/88698ae104444612995b4354c5ce1450 2024-11-22T13:38:35,033 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91ec716bc6d7903852bfd20857daaa93/info of 91ec716bc6d7903852bfd20857daaa93 into 88698ae104444612995b4354c5ce1450(size=159.8 K), total size for store is 159.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:35,033 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:35,033 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., storeName=91ec716bc6d7903852bfd20857daaa93/info, priority=13, startTime=1732282715005; duration=0sec 2024-11-22T13:38:35,033 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:35,033 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91ec716bc6d7903852bfd20857daaa93:info 2024-11-22T13:38:35,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/31045a9de14d4318bc158119d1eace0f 2024-11-22T13:38:35,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/31045a9de14d4318bc158119d1eace0f as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/31045a9de14d4318bc158119d1eace0f 2024-11-22T13:38:35,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/31045a9de14d4318bc158119d1eace0f, entries=12, sequenceid=293, filesize=17.5 K 2024-11-22T13:38:35,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for 91ec716bc6d7903852bfd20857daaa93 in 426ms, sequenceid=293, compaction requested=false 2024-11-22T13:38:35,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:35,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:35,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:36,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:36,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:37,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T13:38:37,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/58124adb8c29410f9bfe17b0e4cb2440 is 1080, key is row0220/info:/1732282715008/Put/seqid=0 2024-11-22T13:38:37,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741874_1050 (size=12523) 2024-11-22T13:38:37,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741874_1050 (size=12523) 2024-11-22T13:38:37,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/58124adb8c29410f9bfe17b0e4cb2440 2024-11-22T13:38:37,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/58124adb8c29410f9bfe17b0e4cb2440 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/58124adb8c29410f9bfe17b0e4cb2440 2024-11-22T13:38:37,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/58124adb8c29410f9bfe17b0e4cb2440, entries=7, sequenceid=304, filesize=12.2 K 2024-11-22T13:38:37,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 91ec716bc6d7903852bfd20857daaa93 in 26ms, sequenceid=304, compaction requested=true 2024-11-22T13:38:37,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:37,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91ec716bc6d7903852bfd20857daaa93:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:37,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:37,050 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:38:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:37,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T13:38:37,051 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 194098 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:38:37,051 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 91ec716bc6d7903852bfd20857daaa93/info is initiating minor compaction (all files) 2024-11-22T13:38:37,051 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91ec716bc6d7903852bfd20857daaa93/info in TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:37,051 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/88698ae104444612995b4354c5ce1450, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/31045a9de14d4318bc158119d1eace0f, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/58124adb8c29410f9bfe17b0e4cb2440] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp, totalSize=189.5 K 2024-11-22T13:38:37,051 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 88698ae104444612995b4354c5ce1450, keycount=146, bloomtype=ROW, size=159.8 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1732282694566 2024-11-22T13:38:37,052 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 31045a9de14d4318bc158119d1eace0f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732282714986 2024-11-22T13:38:37,052 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 58124adb8c29410f9bfe17b0e4cb2440, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1732282715008 2024-11-22T13:38:37,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/af330ea8d7e445d49cdf27b2b61c582a is 1080, key is row0227/info:/1732282717024/Put/seqid=0 2024-11-22T13:38:37,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741875_1051 (size=19013) 2024-11-22T13:38:37,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741875_1051 (size=19013) 2024-11-22T13:38:37,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/af330ea8d7e445d49cdf27b2b61c582a 2024-11-22T13:38:37,065 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91ec716bc6d7903852bfd20857daaa93#info#compaction#90 average throughput is 56.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:37,066 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/cd2b943fc79442e1998d5befe8946d15 is 1080, key is row0062/info:/1732282694566/Put/seqid=0 2024-11-22T13:38:37,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/af330ea8d7e445d49cdf27b2b61c582a as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/af330ea8d7e445d49cdf27b2b61c582a 2024-11-22T13:38:37,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741876_1052 (size=184264) 2024-11-22T13:38:37,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741876_1052 (size=184264) 2024-11-22T13:38:37,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/af330ea8d7e445d49cdf27b2b61c582a, entries=13, sequenceid=320, filesize=18.6 K 2024-11-22T13:38:37,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 91ec716bc6d7903852bfd20857daaa93 in 23ms, sequenceid=320, compaction requested=false 2024-11-22T13:38:37,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:37,075 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/cd2b943fc79442e1998d5befe8946d15 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cd2b943fc79442e1998d5befe8946d15 2024-11-22T13:38:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41207 {}] regionserver.HRegion(8855): Flush requested on 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:37,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T13:38:37,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/06f24a0eafde498c8f50ec951c89dc59 is 1080, key is row0240/info:/1732282717052/Put/seqid=0 2024-11-22T13:38:37,081 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91ec716bc6d7903852bfd20857daaa93/info of 91ec716bc6d7903852bfd20857daaa93 into cd2b943fc79442e1998d5befe8946d15(size=179.9 K), total size for store is 198.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:37,081 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:37,081 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., storeName=91ec716bc6d7903852bfd20857daaa93/info, priority=13, startTime=1732282717049; duration=0sec 2024-11-22T13:38:37,081 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:37,081 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91ec716bc6d7903852bfd20857daaa93:info 2024-11-22T13:38:37,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741877_1053 (size=17918) 2024-11-22T13:38:37,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741877_1053 (size=17918) 2024-11-22T13:38:37,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/06f24a0eafde498c8f50ec951c89dc59 2024-11-22T13:38:37,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/06f24a0eafde498c8f50ec951c89dc59 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/06f24a0eafde498c8f50ec951c89dc59 2024-11-22T13:38:37,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/06f24a0eafde498c8f50ec951c89dc59, entries=12, sequenceid=335, filesize=17.5 K 2024-11-22T13:38:37,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for 91ec716bc6d7903852bfd20857daaa93 in 17ms, sequenceid=335, compaction requested=true 2024-11-22T13:38:37,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:37,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 91ec716bc6d7903852bfd20857daaa93:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T13:38:37,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:37,092 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T13:38:37,094 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 221195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T13:38:37,094 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1541): 91ec716bc6d7903852bfd20857daaa93/info is initiating minor compaction (all files) 2024-11-22T13:38:37,094 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 91ec716bc6d7903852bfd20857daaa93/info in TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:37,094 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cd2b943fc79442e1998d5befe8946d15, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/af330ea8d7e445d49cdf27b2b61c582a, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/06f24a0eafde498c8f50ec951c89dc59] into tmpdir=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp, totalSize=216.0 K 2024-11-22T13:38:37,094 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd2b943fc79442e1998d5befe8946d15, keycount=165, bloomtype=ROW, size=179.9 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1732282694566 2024-11-22T13:38:37,094 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting af330ea8d7e445d49cdf27b2b61c582a, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1732282717024 2024-11-22T13:38:37,095 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] compactions.Compactor(225): Compacting 06f24a0eafde498c8f50ec951c89dc59, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732282717052 2024-11-22T13:38:37,105 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 91ec716bc6d7903852bfd20857daaa93#info#compaction#92 average throughput is 64.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T13:38:37,106 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/b8d48277aa7149c0b0afcf0b1b2d8d98 is 1080, key is row0062/info:/1732282694566/Put/seqid=0 2024-11-22T13:38:37,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741878_1054 (size=211418) 2024-11-22T13:38:37,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741878_1054 (size=211418) 2024-11-22T13:38:37,112 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/b8d48277aa7149c0b0afcf0b1b2d8d98 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/b8d48277aa7149c0b0afcf0b1b2d8d98 2024-11-22T13:38:37,117 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 91ec716bc6d7903852bfd20857daaa93/info of 91ec716bc6d7903852bfd20857daaa93 into b8d48277aa7149c0b0afcf0b1b2d8d98(size=206.5 K), total size for store is 206.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T13:38:37,117 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:37,117 INFO [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93., storeName=91ec716bc6d7903852bfd20857daaa93/info, priority=13, startTime=1732282717092; duration=0sec 2024-11-22T13:38:37,117 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T13:38:37,117 DEBUG [RS:0;e025332d312f:41207-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 91ec716bc6d7903852bfd20857daaa93:info 2024-11-22T13:38:37,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:37,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:38,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:38,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:39,085 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-22T13:38:39,086 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41207%2C1732282681164.1732282719086 2024-11-22T13:38:39,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,099 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,099 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,099 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,099 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,099 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.1732282681789 with entries=319, filesize=310.85 KB; new WAL /user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.1732282719086 2024-11-22T13:38:39,101 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43003:43003),(127.0.0.1/127.0.0.1:43259:43259)] 2024-11-22T13:38:39,101 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.1732282681789 is not closed yet, will try archiving it next time 2024-11-22T13:38:39,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741833_1009 (size=318314) 2024-11-22T13:38:39,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741833_1009 (size=318314) 2024-11-22T13:38:39,107 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4cad7f0310a0b729b7e6f35cdf635d8f: 2024-11-22T13:38:39,107 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-22T13:38:39,113 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/info/8fe6bd837a1444aebad0efa3b8be6ad1 is 193, key is TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93./info:regioninfo/1732282697752/Put/seqid=0 2024-11-22T13:38:39,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741880_1056 (size=6223) 2024-11-22T13:38:39,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741880_1056 (size=6223) 2024-11-22T13:38:39,117 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/info/8fe6bd837a1444aebad0efa3b8be6ad1 2024-11-22T13:38:39,123 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/.tmp/info/8fe6bd837a1444aebad0efa3b8be6ad1 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/info/8fe6bd837a1444aebad0efa3b8be6ad1 2024-11-22T13:38:39,128 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/info/8fe6bd837a1444aebad0efa3b8be6ad1, entries=5, sequenceid=21, filesize=6.1 K 2024-11-22T13:38:39,129 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 22ms, sequenceid=21, compaction requested=false 2024-11-22T13:38:39,129 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T13:38:39,129 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 91ec716bc6d7903852bfd20857daaa93 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-22T13:38:39,133 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/70c7fc1333134d0285208e97b53fa749 is 1080, key is row0252/info:/1732282717076/Put/seqid=0 2024-11-22T13:38:39,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741881_1057 (size=10357) 2024-11-22T13:38:39,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741881_1057 (size=10357) 2024-11-22T13:38:39,141 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=345 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/70c7fc1333134d0285208e97b53fa749 2024-11-22T13:38:39,147 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/.tmp/info/70c7fc1333134d0285208e97b53fa749 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/70c7fc1333134d0285208e97b53fa749 2024-11-22T13:38:39,152 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/70c7fc1333134d0285208e97b53fa749, entries=5, sequenceid=345, filesize=10.1 K 2024-11-22T13:38:39,153 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 91ec716bc6d7903852bfd20857daaa93 in 24ms, sequenceid=345, compaction requested=false 2024-11-22T13:38:39,153 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 91ec716bc6d7903852bfd20857daaa93: 2024-11-22T13:38:39,153 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41207%2C1732282681164.1732282719153 2024-11-22T13:38:39,158 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,158 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,158 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,158 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,158 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,158 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.1732282719086 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.1732282719153 2024-11-22T13:38:39,159 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43003:43003),(127.0.0.1/127.0.0.1:43259:43259)] 2024-11-22T13:38:39,159 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.1732282719086 is not closed yet, will try archiving it next time 2024-11-22T13:38:39,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741879_1055 (size=731) 2024-11-22T13:38:39,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741879_1055 (size=731) 2024-11-22T13:38:39,160 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T13:38:39,161 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T13:38:39,161 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:38:39,161 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:38:39,161 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:39,161 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:39,161 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T13:38:39,161 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T13:38:39,161 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1979131131, stopped=false 2024-11-22T13:38:39,161 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e025332d312f,35245,1732282680999 2024-11-22T13:38:39,162 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.1732282681789 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/oldWALs/e025332d312f%2C41207%2C1732282681164.1732282681789 2024-11-22T13:38:39,163 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/WALs/e025332d312f,41207,1732282681164/e025332d312f%2C41207%2C1732282681164.1732282719086 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/oldWALs/e025332d312f%2C41207%2C1732282681164.1732282719086 2024-11-22T13:38:39,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:38:39,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:38:39,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:39,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:39,260 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:38:39,260 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:38:39,260 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:38:39,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:39,260 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e025332d312f,41207,1732282681164' ***** 2024-11-22T13:38:39,260 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T13:38:39,261 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:38:39,261 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T13:38:39,261 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(3091): Received CLOSE for 4cad7f0310a0b729b7e6f35cdf635d8f 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(3091): Received CLOSE for 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(959): stopping server e025332d312f,41207,1732282681164 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:38:39,261 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4cad7f0310a0b729b7e6f35cdf635d8f, disabling compactions & flushes 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e025332d312f:41207. 2024-11-22T13:38:39,261 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:39,261 DEBUG [RS:0;e025332d312f:41207 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:38:39,261 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:39,261 DEBUG [RS:0;e025332d312f:41207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:39,261 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. after waiting 0 ms 2024-11-22T13:38:39,261 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T13:38:39,261 INFO [RS:0;e025332d312f:41207 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T13:38:39,262 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T13:38:39,262 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-22T13:38:39,262 DEBUG [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(1325): Online Regions={4cad7f0310a0b729b7e6f35cdf635d8f=TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f., 1588230740=hbase:meta,,1.1588230740, 91ec716bc6d7903852bfd20857daaa93=TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.} 2024-11-22T13:38:39,262 DEBUG [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4cad7f0310a0b729b7e6f35cdf635d8f, 91ec716bc6d7903852bfd20857daaa93 2024-11-22T13:38:39,262 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:38:39,262 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:38:39,262 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:38:39,262 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:38:39,262 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:38:39,262 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7->hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/589ce21507ca42f8af4dd026705da313-bottom] to archive 2024-11-22T13:38:39,263 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T13:38:39,264 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:39,265 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=e025332d312f:35245 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T13:38:39,265 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-22T13:38:39,266 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-22T13:38:39,266 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:38:39,266 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:38:39,266 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282719262Running coprocessor pre-close hooks at 1732282719262Disabling compacts and flushes for region at 1732282719262Disabling writes for close at 1732282719262Writing region close event to WAL at 1732282719263 (+1 ms)Running coprocessor post-close hooks at 1732282719266 (+3 ms)Closed at 1732282719266 2024-11-22T13:38:39,266 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T13:38:39,268 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/4cad7f0310a0b729b7e6f35cdf635d8f/recovered.edits/134.seqid, newMaxSeqId=134, maxSeqId=129 2024-11-22T13:38:39,269 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:39,269 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4cad7f0310a0b729b7e6f35cdf635d8f: Waiting for close lock at 1732282719261Running coprocessor pre-close hooks at 1732282719261Disabling compacts and flushes for region at 1732282719261Disabling writes for close at 1732282719261Writing region close event to WAL at 1732282719265 (+4 ms)Running coprocessor post-close hooks at 1732282719269 (+4 ms)Closed at 1732282719269 2024-11-22T13:38:39,269 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732282696663.4cad7f0310a0b729b7e6f35cdf635d8f. 2024-11-22T13:38:39,269 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 91ec716bc6d7903852bfd20857daaa93, disabling compactions & flushes 2024-11-22T13:38:39,269 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:39,269 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:39,269 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. after waiting 0 ms 2024-11-22T13:38:39,269 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:39,270 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7->hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/113e9e9fb4ce69e600badf85506cbae7/info/589ce21507ca42f8af4dd026705da313-top, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-28ece6d8b8dd458a90ac58143568f0fb, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-6e8d6afd83b747a582023f5fa3ddf751, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/87ae5d1637384b3ca5cd55aca0d2fed4, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-838990b953be497aa7ff7f09407854b0, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/270d1b9bb17e417aafc1bcada745294b, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/c6f235ea66b34fbc8cea28b7cb6a12a6, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/ed1d8adc29bf4b6fb65ce9ff72534830, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/a357922d1c87402fa8477f88f620cd24, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/1279a819f25f4459b74f46a7d8ee8722, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/b75191ddea2f47a19b48f3c0a8e2b951, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/6783c5e118dd4af496a207e01b2bd3cb, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/9e1302a092f649fdb46ee7a8ae209431, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/55e161683e144251a22986c7eeebc8aa, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/d9e26c2d9ed74e2fb49cb0f74a8ddd9c, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/3d2085790d9d413993e72f65836fe492, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/89995c13a72443b6b15d596cf06753f2, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cabf0f58560b44d5b3f104a6c5294adc, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/5185efc2fc774d6e886cb239aa1c903b, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/88698ae104444612995b4354c5ce1450, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/4e110ac91c294302bda11bf4e9a79cf2, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/31045a9de14d4318bc158119d1eace0f, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cd2b943fc79442e1998d5befe8946d15, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/58124adb8c29410f9bfe17b0e4cb2440, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/af330ea8d7e445d49cdf27b2b61c582a, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/06f24a0eafde498c8f50ec951c89dc59] to archive 2024-11-22T13:38:39,271 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T13:38:39,272 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/589ce21507ca42f8af4dd026705da313.113e9e9fb4ce69e600badf85506cbae7 2024-11-22T13:38:39,273 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-28ece6d8b8dd458a90ac58143568f0fb to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-28ece6d8b8dd458a90ac58143568f0fb 2024-11-22T13:38:39,274 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-6e8d6afd83b747a582023f5fa3ddf751 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-6e8d6afd83b747a582023f5fa3ddf751 2024-11-22T13:38:39,275 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/87ae5d1637384b3ca5cd55aca0d2fed4 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/87ae5d1637384b3ca5cd55aca0d2fed4 2024-11-22T13:38:39,276 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-838990b953be497aa7ff7f09407854b0 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/TestLogRolling-testLogRolling=113e9e9fb4ce69e600badf85506cbae7-838990b953be497aa7ff7f09407854b0 2024-11-22T13:38:39,277 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/270d1b9bb17e417aafc1bcada745294b to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/270d1b9bb17e417aafc1bcada745294b 2024-11-22T13:38:39,278 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/c6f235ea66b34fbc8cea28b7cb6a12a6 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/c6f235ea66b34fbc8cea28b7cb6a12a6 2024-11-22T13:38:39,279 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/ed1d8adc29bf4b6fb65ce9ff72534830 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/ed1d8adc29bf4b6fb65ce9ff72534830 2024-11-22T13:38:39,280 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/a357922d1c87402fa8477f88f620cd24 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/a357922d1c87402fa8477f88f620cd24 2024-11-22T13:38:39,281 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/1279a819f25f4459b74f46a7d8ee8722 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/1279a819f25f4459b74f46a7d8ee8722 2024-11-22T13:38:39,281 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/b75191ddea2f47a19b48f3c0a8e2b951 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/b75191ddea2f47a19b48f3c0a8e2b951 2024-11-22T13:38:39,282 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/6783c5e118dd4af496a207e01b2bd3cb to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/6783c5e118dd4af496a207e01b2bd3cb 2024-11-22T13:38:39,283 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/9e1302a092f649fdb46ee7a8ae209431 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/9e1302a092f649fdb46ee7a8ae209431 2024-11-22T13:38:39,284 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/55e161683e144251a22986c7eeebc8aa to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/55e161683e144251a22986c7eeebc8aa 2024-11-22T13:38:39,285 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/d9e26c2d9ed74e2fb49cb0f74a8ddd9c to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/d9e26c2d9ed74e2fb49cb0f74a8ddd9c 2024-11-22T13:38:39,285 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/3d2085790d9d413993e72f65836fe492 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/3d2085790d9d413993e72f65836fe492 2024-11-22T13:38:39,286 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/89995c13a72443b6b15d596cf06753f2 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/89995c13a72443b6b15d596cf06753f2 2024-11-22T13:38:39,287 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cabf0f58560b44d5b3f104a6c5294adc to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cabf0f58560b44d5b3f104a6c5294adc 2024-11-22T13:38:39,288 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/5185efc2fc774d6e886cb239aa1c903b to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/5185efc2fc774d6e886cb239aa1c903b 2024-11-22T13:38:39,289 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/88698ae104444612995b4354c5ce1450 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/88698ae104444612995b4354c5ce1450 2024-11-22T13:38:39,289 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/4e110ac91c294302bda11bf4e9a79cf2 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/4e110ac91c294302bda11bf4e9a79cf2 2024-11-22T13:38:39,290 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/31045a9de14d4318bc158119d1eace0f to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/31045a9de14d4318bc158119d1eace0f 2024-11-22T13:38:39,291 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cd2b943fc79442e1998d5befe8946d15 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/cd2b943fc79442e1998d5befe8946d15 2024-11-22T13:38:39,292 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/58124adb8c29410f9bfe17b0e4cb2440 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/58124adb8c29410f9bfe17b0e4cb2440 2024-11-22T13:38:39,293 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/af330ea8d7e445d49cdf27b2b61c582a to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/af330ea8d7e445d49cdf27b2b61c582a 2024-11-22T13:38:39,293 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/06f24a0eafde498c8f50ec951c89dc59 to hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/archive/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/info/06f24a0eafde498c8f50ec951c89dc59 2024-11-22T13:38:39,294 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [87ae5d1637384b3ca5cd55aca0d2fed4=43081, 270d1b9bb17e417aafc1bcada745294b=12516, c6f235ea66b34fbc8cea28b7cb6a12a6=63733, ed1d8adc29bf4b6fb65ce9ff72534830=17906, a357922d1c87402fa8477f88f620cd24=16828, 1279a819f25f4459b74f46a7d8ee8722=83312, b75191ddea2f47a19b48f3c0a8e2b951=12516, 6783c5e118dd4af496a207e01b2bd3cb=16828, 9e1302a092f649fdb46ee7a8ae209431=17906, 55e161683e144251a22986c7eeebc8aa=115796, d9e26c2d9ed74e2fb49cb0f74a8ddd9c=12516, 3d2085790d9d413993e72f65836fe492=20078, 89995c13a72443b6b15d596cf06753f2=144131, cabf0f58560b44d5b3f104a6c5294adc=17906, 5185efc2fc774d6e886cb239aa1c903b=12521, 88698ae104444612995b4354c5ce1450=163657, 4e110ac91c294302bda11bf4e9a79cf2=16839, 31045a9de14d4318bc158119d1eace0f=17918, cd2b943fc79442e1998d5befe8946d15=184264, 58124adb8c29410f9bfe17b0e4cb2440=12523, af330ea8d7e445d49cdf27b2b61c582a=19013, 06f24a0eafde498c8f50ec951c89dc59=17918] 2024-11-22T13:38:39,297 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/data/default/TestLogRolling-testLogRolling/91ec716bc6d7903852bfd20857daaa93/recovered.edits/348.seqid, newMaxSeqId=348, maxSeqId=129 2024-11-22T13:38:39,297 INFO [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:39,297 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 91ec716bc6d7903852bfd20857daaa93: Waiting for close lock at 1732282719269Running coprocessor pre-close hooks at 1732282719269Disabling compacts and flushes for region at 1732282719269Disabling writes for close at 1732282719269Writing region close event to WAL at 1732282719294 (+25 ms)Running coprocessor post-close hooks at 1732282719297 (+3 ms)Closed at 1732282719297 2024-11-22T13:38:39,297 DEBUG [RS_CLOSE_REGION-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732282696663.91ec716bc6d7903852bfd20857daaa93. 2024-11-22T13:38:39,462 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(976): stopping server e025332d312f,41207,1732282681164; all regions closed. 2024-11-22T13:38:39,463 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,463 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,463 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,463 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,464 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741834_1010 (size=8107) 2024-11-22T13:38:39,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741834_1010 (size=8107) 2024-11-22T13:38:39,472 DEBUG [RS:0;e025332d312f:41207 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/oldWALs 2024-11-22T13:38:39,472 INFO [RS:0;e025332d312f:41207 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C41207%2C1732282681164.meta:.meta(num 1732282682184) 2024-11-22T13:38:39,473 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,473 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,473 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,473 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,473 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741882_1058 (size=780) 2024-11-22T13:38:39,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741882_1058 (size=780) 2024-11-22T13:38:39,480 DEBUG [RS:0;e025332d312f:41207 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/oldWALs 2024-11-22T13:38:39,480 INFO [RS:0;e025332d312f:41207 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C41207%2C1732282681164:(num 1732282719153) 2024-11-22T13:38:39,480 DEBUG [RS:0;e025332d312f:41207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:39,480 INFO [RS:0;e025332d312f:41207 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:38:39,480 INFO [RS:0;e025332d312f:41207 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:38:39,480 INFO [RS:0;e025332d312f:41207 {}] hbase.ChoreService(370): Chore service for: regionserver/e025332d312f:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T13:38:39,480 INFO [RS:0;e025332d312f:41207 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:38:39,480 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:38:39,481 INFO [RS:0;e025332d312f:41207 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41207 2024-11-22T13:38:39,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e025332d312f,41207,1732282681164 2024-11-22T13:38:39,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:38:39,491 INFO [RS:0;e025332d312f:41207 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:38:39,502 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e025332d312f,41207,1732282681164] 2024-11-22T13:38:39,512 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e025332d312f,41207,1732282681164 already deleted, retry=false 2024-11-22T13:38:39,512 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e025332d312f,41207,1732282681164 expired; onlineServers=0 2024-11-22T13:38:39,512 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e025332d312f,35245,1732282680999' ***** 2024-11-22T13:38:39,512 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T13:38:39,512 INFO [M:0;e025332d312f:35245 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:38:39,513 INFO [M:0;e025332d312f:35245 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:38:39,513 DEBUG [M:0;e025332d312f:35245 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T13:38:39,513 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T13:38:39,513 DEBUG [M:0;e025332d312f:35245 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T13:38:39,513 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282681504 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282681504,5,FailOnTimeoutGroup] 2024-11-22T13:38:39,513 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282681504 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282681504,5,FailOnTimeoutGroup] 2024-11-22T13:38:39,513 INFO [M:0;e025332d312f:35245 {}] hbase.ChoreService(370): Chore service for: master/e025332d312f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T13:38:39,513 INFO [M:0;e025332d312f:35245 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:38:39,513 DEBUG [M:0;e025332d312f:35245 {}] master.HMaster(1795): Stopping service threads 2024-11-22T13:38:39,513 INFO [M:0;e025332d312f:35245 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T13:38:39,513 INFO [M:0;e025332d312f:35245 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:38:39,513 INFO [M:0;e025332d312f:35245 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T13:38:39,514 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T13:38:39,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T13:38:39,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:39,523 DEBUG [M:0;e025332d312f:35245 {}] zookeeper.ZKUtil(347): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T13:38:39,523 WARN [M:0;e025332d312f:35245 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T13:38:39,524 INFO [M:0;e025332d312f:35245 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/.lastflushedseqids 2024-11-22T13:38:39,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741883_1059 (size=228) 2024-11-22T13:38:39,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741883_1059 (size=228) 2024-11-22T13:38:39,533 INFO [M:0;e025332d312f:35245 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T13:38:39,533 INFO [M:0;e025332d312f:35245 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T13:38:39,533 DEBUG [M:0;e025332d312f:35245 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:38:39,533 INFO [M:0;e025332d312f:35245 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:39,533 DEBUG [M:0;e025332d312f:35245 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:39,533 DEBUG [M:0;e025332d312f:35245 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:38:39,533 DEBUG [M:0;e025332d312f:35245 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:39,533 INFO [M:0;e025332d312f:35245 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.44 KB heapSize=63.39 KB 2024-11-22T13:38:39,548 DEBUG [M:0;e025332d312f:35245 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/08a7e604b0654a9391cd15680f248f05 is 82, key is hbase:meta,,1/info:regioninfo/1732282682209/Put/seqid=0 2024-11-22T13:38:39,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741884_1060 (size=5672) 2024-11-22T13:38:39,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741884_1060 (size=5672) 2024-11-22T13:38:39,553 INFO [M:0;e025332d312f:35245 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/08a7e604b0654a9391cd15680f248f05 2024-11-22T13:38:39,569 DEBUG [M:0;e025332d312f:35245 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b10800bae194e00804ca76d167aeee1 is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732282682673/Put/seqid=0 2024-11-22T13:38:39,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741885_1061 (size=7091) 2024-11-22T13:38:39,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741885_1061 (size=7091) 2024-11-22T13:38:39,574 INFO [M:0;e025332d312f:35245 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.84 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b10800bae194e00804ca76d167aeee1 2024-11-22T13:38:39,578 INFO [M:0;e025332d312f:35245 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2b10800bae194e00804ca76d167aeee1 2024-11-22T13:38:39,591 DEBUG [M:0;e025332d312f:35245 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d808004102d04ade9494c3f7ad11e245 is 69, key is e025332d312f,41207,1732282681164/rs:state/1732282681633/Put/seqid=0 2024-11-22T13:38:39,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741886_1062 (size=5156) 2024-11-22T13:38:39,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741886_1062 (size=5156) 2024-11-22T13:38:39,596 INFO [M:0;e025332d312f:35245 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d808004102d04ade9494c3f7ad11e245 2024-11-22T13:38:39,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:38:39,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41207-0x10162c402410001, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:38:39,602 INFO [RS:0;e025332d312f:41207 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:38:39,602 INFO [RS:0;e025332d312f:41207 {}] regionserver.HRegionServer(1031): Exiting; stopping=e025332d312f,41207,1732282681164; zookeeper connection closed. 2024-11-22T13:38:39,602 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7767cba9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7767cba9 2024-11-22T13:38:39,603 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T13:38:39,620 DEBUG [M:0;e025332d312f:35245 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/750e27473ee440cea7abfaaf2ef26f68 is 52, key is load_balancer_on/state:d/1732282682295/Put/seqid=0 2024-11-22T13:38:39,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741887_1063 (size=5056) 2024-11-22T13:38:39,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741887_1063 (size=5056) 2024-11-22T13:38:39,625 INFO [M:0;e025332d312f:35245 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/750e27473ee440cea7abfaaf2ef26f68 2024-11-22T13:38:39,630 DEBUG [M:0;e025332d312f:35245 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/08a7e604b0654a9391cd15680f248f05 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/08a7e604b0654a9391cd15680f248f05 2024-11-22T13:38:39,635 INFO [M:0;e025332d312f:35245 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/08a7e604b0654a9391cd15680f248f05, entries=8, sequenceid=125, filesize=5.5 K 2024-11-22T13:38:39,636 DEBUG [M:0;e025332d312f:35245 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/2b10800bae194e00804ca76d167aeee1 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2b10800bae194e00804ca76d167aeee1 2024-11-22T13:38:39,642 INFO [M:0;e025332d312f:35245 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 2b10800bae194e00804ca76d167aeee1 2024-11-22T13:38:39,642 INFO [M:0;e025332d312f:35245 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/2b10800bae194e00804ca76d167aeee1, entries=13, sequenceid=125, filesize=6.9 K 2024-11-22T13:38:39,643 DEBUG [M:0;e025332d312f:35245 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d808004102d04ade9494c3f7ad11e245 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d808004102d04ade9494c3f7ad11e245 2024-11-22T13:38:39,648 INFO [M:0;e025332d312f:35245 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d808004102d04ade9494c3f7ad11e245, entries=1, sequenceid=125, filesize=5.0 K 2024-11-22T13:38:39,649 DEBUG [M:0;e025332d312f:35245 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/750e27473ee440cea7abfaaf2ef26f68 as hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/750e27473ee440cea7abfaaf2ef26f68 2024-11-22T13:38:39,654 INFO [M:0;e025332d312f:35245 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43267/user/jenkins/test-data/95e92cc1-35df-35c1-bd72-3393ba18144b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/750e27473ee440cea7abfaaf2ef26f68, entries=1, sequenceid=125, filesize=4.9 K 2024-11-22T13:38:39,655 INFO [M:0;e025332d312f:35245 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=125, compaction requested=false 2024-11-22T13:38:39,655 INFO [regionserver/e025332d312f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:38:39,656 INFO [M:0;e025332d312f:35245 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:39,656 DEBUG [M:0;e025332d312f:35245 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282719533Disabling compacts and flushes for region at 1732282719533Disabling writes for close at 1732282719533Obtaining lock to block concurrent updates at 1732282719533Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732282719533Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52675, getHeapSize=64848, getOffHeapSize=0, getCellsCount=148 at 1732282719534 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732282719534Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732282719534Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732282719548 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732282719548Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732282719556 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732282719569 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732282719569Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732282719578 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732282719591 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732282719591Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732282719601 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732282719620 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732282719620Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2088bf5d: reopening flushed file at 1732282719629 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@333de877: reopening flushed file at 1732282719636 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1eb6c04: reopening flushed file at 1732282719642 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6aa02ee2: reopening flushed file at 1732282719648 (+6 ms)Finished flush of dataSize ~51.44 KB/52675, heapSize ~63.33 KB/64848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 122ms, sequenceid=125, compaction requested=false at 1732282719655 (+7 ms)Writing region close event to WAL at 1732282719656 (+1 ms)Closed at 1732282719656 2024-11-22T13:38:39,656 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,657 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,657 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,657 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,657 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:39,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35875 is added to blk_1073741830_1006 (size=61344) 2024-11-22T13:38:39,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44415 is added to blk_1073741830_1006 (size=61344) 2024-11-22T13:38:39,659 INFO [M:0;e025332d312f:35245 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T13:38:39,659 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:38:39,659 INFO [M:0;e025332d312f:35245 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35245 2024-11-22T13:38:39,659 INFO [M:0;e025332d312f:35245 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:38:39,770 INFO [M:0;e025332d312f:35245 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:38:39,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:38:39,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35245-0x10162c402410000, quorum=127.0.0.1:64347, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:38:39,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62d57d83{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:38:39,773 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7cbc9241{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:38:39,773 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:38:39,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57390027{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:38:39,773 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6ff82d67{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.log.dir/,STOPPED} 2024-11-22T13:38:39,774 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:38:39,774 WARN [BP-23462902-172.17.0.2-1732282678811 heartbeating to localhost/127.0.0.1:43267 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:38:39,774 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:38:39,774 WARN [BP-23462902-172.17.0.2-1732282678811 heartbeating to localhost/127.0.0.1:43267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-23462902-172.17.0.2-1732282678811 (Datanode Uuid f68979ea-5d8e-4a31-8f4b-04833ce0c856) service to localhost/127.0.0.1:43267 2024-11-22T13:38:39,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/data/data3/current/BP-23462902-172.17.0.2-1732282678811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:38:39,775 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/data/data4/current/BP-23462902-172.17.0.2-1732282678811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:38:39,775 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:38:39,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e483a61{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:38:39,778 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ab86f9f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:38:39,778 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:38:39,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47c8059{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:38:39,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7181dda1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.log.dir/,STOPPED} 2024-11-22T13:38:39,778 WARN [BP-23462902-172.17.0.2-1732282678811 heartbeating to localhost/127.0.0.1:43267 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-23462902-172.17.0.2-1732282678811 (Datanode Uuid 58b21bbf-e607-4a00-b1f2-21128c369588) service to localhost/127.0.0.1:43267 2024-11-22T13:38:39,779 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/data/data1/current/BP-23462902-172.17.0.2-1732282678811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:38:39,779 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/cluster_06cbc5f1-b91a-f92d-86d9-2c2994f36539/data/data2/current/BP-23462902-172.17.0.2-1732282678811 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:38:39,780 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:38:39,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7986f193{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:38:39,786 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47cbf00e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:38:39,786 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:38:39,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d31ee43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:38:39,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41a74ab6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.log.dir/,STOPPED} 2024-11-22T13:38:39,794 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T13:38:39,825 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T13:38:39,835 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=233 (was 208) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43267 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43267 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43267 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:43267 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43267 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43267 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=521 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=50 (was 53), ProcessCount=11 (was 11), AvailableMemoryMB=2316 (was 2361) 2024-11-22T13:38:39,842 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=233, OpenFileDescriptor=521, MaxFileDescriptor=1048576, SystemLoadAverage=50, ProcessCount=11, AvailableMemoryMB=2316 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.log.dir so I do NOT create it in target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/fe6dbdc9-b81b-8742-bc34-223421373e1b/hadoop.tmp.dir so I do NOT create it in target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445, deleteOnExit=true 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/test.cache.data in system properties and HBase conf 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/hadoop.log.dir in system properties and HBase conf 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T13:38:39,843 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T13:38:39,844 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T13:38:39,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:38:39,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T13:38:39,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T13:38:39,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:38:39,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T13:38:39,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T13:38:39,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T13:38:39,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:38:39,844 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T13:38:39,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/nfs.dump.dir in system properties and HBase conf 2024-11-22T13:38:39,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/java.io.tmpdir in system properties and HBase conf 2024-11-22T13:38:39,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T13:38:39,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T13:38:39,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T13:38:39,858 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:38:39,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:39,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:40,195 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:38:40,198 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:38:40,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:38:40,199 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:38:40,199 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T13:38:40,200 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:38:40,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2601a9a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:38:40,200 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e4376a4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:38:40,291 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@cbcac8c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/java.io.tmpdir/jetty-localhost-42581-hadoop-hdfs-3_4_1-tests_jar-_-any-8500182118624835994/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:38:40,291 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4cb35637{HTTP/1.1, (http/1.1)}{localhost:42581} 2024-11-22T13:38:40,291 INFO [Time-limited test {}] server.Server(415): Started @289471ms 2024-11-22T13:38:40,302 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T13:38:40,551 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:38:40,554 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:38:40,554 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:38:40,554 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:38:40,554 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T13:38:40,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59532081{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:38:40,555 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e0b3b7c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:38:40,646 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11ec225b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/java.io.tmpdir/jetty-localhost-38621-hadoop-hdfs-3_4_1-tests_jar-_-any-18217594270448124912/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:38:40,647 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6df20715{HTTP/1.1, (http/1.1)}{localhost:38621} 2024-11-22T13:38:40,647 INFO [Time-limited test {}] server.Server(415): Started @289826ms 2024-11-22T13:38:40,648 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:38:40,672 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T13:38:40,674 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T13:38:40,675 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T13:38:40,675 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T13:38:40,675 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T13:38:40,675 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c78c12c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/hadoop.log.dir/,AVAILABLE} 2024-11-22T13:38:40,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e54a8c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T13:38:40,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@393b35ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/java.io.tmpdir/jetty-localhost-39525-hadoop-hdfs-3_4_1-tests_jar-_-any-2438972775747573728/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:38:40,768 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51c0efa3{HTTP/1.1, (http/1.1)}{localhost:39525} 2024-11-22T13:38:40,768 INFO [Time-limited test {}] server.Server(415): Started @289947ms 2024-11-22T13:38:40,769 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T13:38:40,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:40,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:41,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:41,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:41,938 WARN [Thread-2510 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/data/data1/current/BP-277395602-172.17.0.2-1732282719861/current, will proceed with Du for space computation calculation, 2024-11-22T13:38:41,939 WARN [Thread-2511 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/data/data2/current/BP-277395602-172.17.0.2-1732282719861/current, will proceed with Du for space computation calculation, 2024-11-22T13:38:41,957 WARN [Thread-2474 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:38:41,959 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x97fad32dd84b0420 with lease ID 0x9d9a520d4f663ea1: Processing first storage report for DS-bdb5022e-4b49-4c0c-82c8-681d5e479a96 from datanode DatanodeRegistration(127.0.0.1:45201, datanodeUuid=3722ec9a-a445-49f8-a5e2-364857838e95, infoPort=44053, infoSecurePort=0, ipcPort=42397, storageInfo=lv=-57;cid=testClusterID;nsid=1815900113;c=1732282719861) 2024-11-22T13:38:41,959 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x97fad32dd84b0420 with lease ID 0x9d9a520d4f663ea1: from storage DS-bdb5022e-4b49-4c0c-82c8-681d5e479a96 node DatanodeRegistration(127.0.0.1:45201, datanodeUuid=3722ec9a-a445-49f8-a5e2-364857838e95, infoPort=44053, infoSecurePort=0, ipcPort=42397, storageInfo=lv=-57;cid=testClusterID;nsid=1815900113;c=1732282719861), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T13:38:41,959 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x97fad32dd84b0420 with lease ID 0x9d9a520d4f663ea1: Processing first storage report for DS-596eb8dc-ab67-4bd4-88be-c533a3122ec6 from datanode DatanodeRegistration(127.0.0.1:45201, datanodeUuid=3722ec9a-a445-49f8-a5e2-364857838e95, infoPort=44053, infoSecurePort=0, ipcPort=42397, storageInfo=lv=-57;cid=testClusterID;nsid=1815900113;c=1732282719861) 2024-11-22T13:38:41,959 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x97fad32dd84b0420 with lease ID 0x9d9a520d4f663ea1: from storage DS-596eb8dc-ab67-4bd4-88be-c533a3122ec6 node DatanodeRegistration(127.0.0.1:45201, datanodeUuid=3722ec9a-a445-49f8-a5e2-364857838e95, infoPort=44053, infoSecurePort=0, ipcPort=42397, storageInfo=lv=-57;cid=testClusterID;nsid=1815900113;c=1732282719861), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:38:42,096 WARN [Thread-2521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/data/data3/current/BP-277395602-172.17.0.2-1732282719861/current, will proceed with Du for space computation calculation, 2024-11-22T13:38:42,096 WARN [Thread-2522 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/data/data4/current/BP-277395602-172.17.0.2-1732282719861/current, will proceed with Du for space computation calculation, 2024-11-22T13:38:42,117 WARN [Thread-2497 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T13:38:42,119 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb24d34b36af82385 with lease ID 0x9d9a520d4f663ea2: Processing first storage report for DS-3d4e4d5e-8f5b-43c5-b4ac-a68020dfd4e6 from datanode DatanodeRegistration(127.0.0.1:40549, datanodeUuid=dd626bab-b615-4e57-ba0b-a9baec66e793, infoPort=37699, infoSecurePort=0, ipcPort=45493, storageInfo=lv=-57;cid=testClusterID;nsid=1815900113;c=1732282719861) 2024-11-22T13:38:42,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb24d34b36af82385 with lease ID 0x9d9a520d4f663ea2: from storage DS-3d4e4d5e-8f5b-43c5-b4ac-a68020dfd4e6 node DatanodeRegistration(127.0.0.1:40549, datanodeUuid=dd626bab-b615-4e57-ba0b-a9baec66e793, infoPort=37699, infoSecurePort=0, ipcPort=45493, storageInfo=lv=-57;cid=testClusterID;nsid=1815900113;c=1732282719861), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:38:42,119 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb24d34b36af82385 with lease ID 0x9d9a520d4f663ea2: Processing first storage report for DS-f9026c18-1cf0-4074-a9fc-27ba8d3ce2c4 from datanode DatanodeRegistration(127.0.0.1:40549, datanodeUuid=dd626bab-b615-4e57-ba0b-a9baec66e793, infoPort=37699, infoSecurePort=0, ipcPort=45493, storageInfo=lv=-57;cid=testClusterID;nsid=1815900113;c=1732282719861) 2024-11-22T13:38:42,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb24d34b36af82385 with lease ID 0x9d9a520d4f663ea2: from storage DS-f9026c18-1cf0-4074-a9fc-27ba8d3ce2c4 node DatanodeRegistration(127.0.0.1:40549, datanodeUuid=dd626bab-b615-4e57-ba0b-a9baec66e793, infoPort=37699, infoSecurePort=0, ipcPort=45493, storageInfo=lv=-57;cid=testClusterID;nsid=1815900113;c=1732282719861), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T13:38:42,204 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3 2024-11-22T13:38:42,210 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/zookeeper_0, clientPort=50360, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T13:38:42,211 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50360 2024-11-22T13:38:42,212 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:42,213 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:42,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:38:42,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741825_1001 (size=7) 2024-11-22T13:38:42,224 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7 with version=8 2024-11-22T13:38:42,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37741/user/jenkins/test-data/a383099a-5c50-7485-173d-3f9fa0b8e56f/hbase-staging 2024-11-22T13:38:42,226 INFO [Time-limited test {}] client.ConnectionUtils(128): master/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:38:42,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:42,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:42,226 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:38:42,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:42,226 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:38:42,226 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T13:38:42,226 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:38:42,227 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41717 2024-11-22T13:38:42,228 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41717 connecting to ZooKeeper ensemble=127.0.0.1:50360 2024-11-22T13:38:42,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:417170x0, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:38:42,284 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41717-0x10162c4a34b0000 connected 2024-11-22T13:38:42,365 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:42,366 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:42,367 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:38:42,368 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7, hbase.cluster.distributed=false 2024-11-22T13:38:42,369 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:38:42,369 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41717 2024-11-22T13:38:42,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41717 2024-11-22T13:38:42,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41717 2024-11-22T13:38:42,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41717 2024-11-22T13:38:42,370 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41717 2024-11-22T13:38:42,386 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/e025332d312f:0 server-side Connection retries=45 2024-11-22T13:38:42,386 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:42,386 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:42,386 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T13:38:42,386 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T13:38:42,386 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T13:38:42,386 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T13:38:42,386 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T13:38:42,387 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45083 2024-11-22T13:38:42,388 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45083 connecting to ZooKeeper ensemble=127.0.0.1:50360 2024-11-22T13:38:42,388 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:42,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:42,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:450830x0, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T13:38:42,397 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:38:42,397 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45083-0x10162c4a34b0001 connected 2024-11-22T13:38:42,397 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T13:38:42,398 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T13:38:42,398 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T13:38:42,399 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T13:38:42,399 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45083 2024-11-22T13:38:42,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45083 2024-11-22T13:38:42,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45083 2024-11-22T13:38:42,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45083 2024-11-22T13:38:42,404 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45083 2024-11-22T13:38:42,415 DEBUG [M:0;e025332d312f:41717 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e025332d312f:41717 2024-11-22T13:38:42,415 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/e025332d312f,41717,1732282722226 2024-11-22T13:38:42,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:38:42,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:38:42,428 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e025332d312f,41717,1732282722226 2024-11-22T13:38:42,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T13:38:42,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,439 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T13:38:42,439 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e025332d312f,41717,1732282722226 from backup master directory 2024-11-22T13:38:42,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e025332d312f,41717,1732282722226 2024-11-22T13:38:42,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:38:42,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T13:38:42,449 WARN [master/e025332d312f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:38:42,449 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e025332d312f,41717,1732282722226 2024-11-22T13:38:42,452 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/hbase.id] with ID: 0c2487c2-82a8-4a13-92f3-70a0e1d9add3 2024-11-22T13:38:42,452 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/.tmp/hbase.id 2024-11-22T13:38:42,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:38:42,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741826_1002 (size=42) 2024-11-22T13:38:42,458 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/.tmp/hbase.id]:[hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/hbase.id] 2024-11-22T13:38:42,467 INFO [master/e025332d312f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:42,467 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T13:38:42,468 INFO [master/e025332d312f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T13:38:42,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:38:42,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741827_1003 (size=196) 2024-11-22T13:38:42,487 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T13:38:42,488 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T13:38:42,489 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:38:42,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:38:42,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741828_1004 (size=1189) 2024-11-22T13:38:42,498 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store 2024-11-22T13:38:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:38:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741829_1005 (size=34) 2024-11-22T13:38:42,506 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:42,506 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:38:42,506 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:42,506 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:42,506 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:38:42,506 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:42,506 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:42,506 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282722506Disabling compacts and flushes for region at 1732282722506Disabling writes for close at 1732282722506Writing region close event to WAL at 1732282722506Closed at 1732282722506 2024-11-22T13:38:42,506 WARN [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/.initializing 2024-11-22T13:38:42,507 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/WALs/e025332d312f,41717,1732282722226 2024-11-22T13:38:42,509 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C41717%2C1732282722226, suffix=, logDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/WALs/e025332d312f,41717,1732282722226, archiveDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/oldWALs, maxLogs=10 2024-11-22T13:38:42,509 INFO [master/e025332d312f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C41717%2C1732282722226.1732282722509 2024-11-22T13:38:42,513 INFO [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/WALs/e025332d312f,41717,1732282722226/e025332d312f%2C41717%2C1732282722226.1732282722509 2024-11-22T13:38:42,516 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44053:44053),(127.0.0.1/127.0.0.1:37699:37699)] 2024-11-22T13:38:42,520 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:38:42,520 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:42,520 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,520 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,522 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T13:38:42,523 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:42,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:42,523 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T13:38:42,524 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:42,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:38:42,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T13:38:42,526 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:42,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:38:42,526 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,527 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T13:38:42,527 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:42,528 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T13:38:42,528 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,528 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,529 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,530 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,530 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,530 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T13:38:42,531 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T13:38:42,533 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:38:42,533 INFO [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787193, jitterRate=9.683668613433838E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T13:38:42,534 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732282722521Initializing all the Stores at 1732282722521Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282722521Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282722522 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282722522Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282722522Cleaning up temporary data from old regions at 1732282722530 (+8 ms)Region opened successfully at 1732282722534 (+4 ms) 2024-11-22T13:38:42,534 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T13:38:42,536 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7edee7e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:38:42,537 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T13:38:42,537 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T13:38:42,537 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T13:38:42,537 INFO [master/e025332d312f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T13:38:42,538 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T13:38:42,538 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T13:38:42,538 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T13:38:42,540 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T13:38:42,540 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T13:38:42,543 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T13:38:42,544 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T13:38:42,544 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T13:38:42,554 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T13:38:42,554 INFO [master/e025332d312f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T13:38:42,555 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T13:38:42,564 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T13:38:42,566 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T13:38:42,575 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T13:38:42,578 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T13:38:42,586 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T13:38:42,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:38:42,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T13:38:42,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,598 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=e025332d312f,41717,1732282722226, sessionid=0x10162c4a34b0000, setting cluster-up flag (Was=false) 2024-11-22T13:38:42,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,618 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,649 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T13:38:42,650 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,41717,1732282722226 2024-11-22T13:38:42,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,670 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:42,701 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T13:38:42,703 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e025332d312f,41717,1732282722226 2024-11-22T13:38:42,704 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T13:38:42,705 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T13:38:42,705 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T13:38:42,706 INFO [master/e025332d312f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T13:38:42,706 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e025332d312f,41717,1732282722226 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T13:38:42,706 INFO [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(746): ClusterId : 0c2487c2-82a8-4a13-92f3-70a0e1d9add3 2024-11-22T13:38:42,706 DEBUG [RS:0;e025332d312f:45083 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T13:38:42,713 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:38:42,713 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:38:42,713 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:38:42,713 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e025332d312f:0, corePoolSize=5, maxPoolSize=5 2024-11-22T13:38:42,713 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e025332d312f:0, corePoolSize=10, maxPoolSize=10 2024-11-22T13:38:42,713 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,713 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:38:42,713 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,713 DEBUG [RS:0;e025332d312f:45083 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T13:38:42,713 DEBUG [RS:0;e025332d312f:45083 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T13:38:42,714 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732282752714 2024-11-22T13:38:42,715 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T13:38:42,715 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T13:38:42,715 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T13:38:42,715 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T13:38:42,715 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T13:38:42,715 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T13:38:42,715 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,715 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:38:42,715 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T13:38:42,716 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T13:38:42,716 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T13:38:42,716 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T13:38:42,716 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T13:38:42,716 INFO [master/e025332d312f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T13:38:42,717 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282722716,5,FailOnTimeoutGroup] 2024-11-22T13:38:42,717 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:42,717 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282722717,5,FailOnTimeoutGroup] 2024-11-22T13:38:42,717 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,717 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T13:38:42,717 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,717 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,717 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T13:38:42,723 DEBUG [RS:0;e025332d312f:45083 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T13:38:42,724 DEBUG [RS:0;e025332d312f:45083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@606afcee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e025332d312f/172.17.0.2:0 2024-11-22T13:38:42,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:38:42,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741831_1007 (size=1321) 2024-11-22T13:38:42,726 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T13:38:42,726 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7 2024-11-22T13:38:42,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:38:42,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741832_1008 (size=32) 2024-11-22T13:38:42,733 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:42,734 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:38:42,735 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:38:42,735 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:42,735 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:42,735 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:38:42,736 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:38:42,736 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:42,737 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:42,737 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:38:42,738 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:38:42,738 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:42,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:42,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:38:42,739 DEBUG [RS:0;e025332d312f:45083 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e025332d312f:45083 2024-11-22T13:38:42,739 INFO [RS:0;e025332d312f:45083 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T13:38:42,739 INFO [RS:0;e025332d312f:45083 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T13:38:42,739 DEBUG [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T13:38:42,739 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:38:42,739 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:42,739 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:42,740 INFO [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(2659): reportForDuty to master=e025332d312f,41717,1732282722226 with port=45083, startcode=1732282722386 2024-11-22T13:38:42,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:38:42,740 DEBUG [RS:0;e025332d312f:45083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T13:38:42,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740 2024-11-22T13:38:42,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740 2024-11-22T13:38:42,741 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:38:42,741 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:38:42,741 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39679, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T13:38:42,742 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:38:42,742 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41717 {}] master.ServerManager(363): Checking decommissioned status of RegionServer e025332d312f,45083,1732282722386 2024-11-22T13:38:42,742 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41717 {}] master.ServerManager(517): Registering regionserver=e025332d312f,45083,1732282722386 2024-11-22T13:38:42,743 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:38:42,743 DEBUG [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7 2024-11-22T13:38:42,743 DEBUG [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40981 2024-11-22T13:38:42,743 DEBUG [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T13:38:42,744 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T13:38:42,744 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=756033, jitterRate=-0.03865484893321991}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:38:42,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732282722733Initializing all the Stores at 1732282722733Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282722733Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282722734 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282722734Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282722734Cleaning up temporary data from old regions at 1732282722741 (+7 ms)Region opened successfully at 1732282722745 (+4 ms) 2024-11-22T13:38:42,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:38:42,745 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:38:42,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:38:42,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:38:42,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:38:42,745 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:38:42,745 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282722745Disabling compacts and flushes for region at 1732282722745Disabling writes for close at 1732282722745Writing region close event to WAL at 1732282722745Closed at 1732282722745 2024-11-22T13:38:42,746 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:38:42,746 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T13:38:42,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T13:38:42,747 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:38:42,748 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T13:38:42,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:38:42,754 DEBUG [RS:0;e025332d312f:45083 {}] zookeeper.ZKUtil(111): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e025332d312f,45083,1732282722386 2024-11-22T13:38:42,755 WARN [RS:0;e025332d312f:45083 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T13:38:42,755 INFO [RS:0;e025332d312f:45083 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:38:42,755 DEBUG [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/e025332d312f,45083,1732282722386 2024-11-22T13:38:42,755 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e025332d312f,45083,1732282722386] 2024-11-22T13:38:42,757 INFO [RS:0;e025332d312f:45083 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T13:38:42,758 INFO [RS:0;e025332d312f:45083 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T13:38:42,759 INFO [RS:0;e025332d312f:45083 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T13:38:42,759 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,759 INFO [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T13:38:42,759 INFO [RS:0;e025332d312f:45083 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T13:38:42,760 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e025332d312f:0, corePoolSize=2, maxPoolSize=2 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e025332d312f:0, corePoolSize=1, maxPoolSize=1 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:38:42,760 DEBUG [RS:0;e025332d312f:45083 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e025332d312f:0, corePoolSize=3, maxPoolSize=3 2024-11-22T13:38:42,761 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,762 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,762 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,762 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,762 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,762 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45083,1732282722386-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:38:42,775 INFO [RS:0;e025332d312f:45083 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T13:38:42,775 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,45083,1732282722386-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,776 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,776 INFO [RS:0;e025332d312f:45083 {}] regionserver.Replication(171): e025332d312f,45083,1732282722386 started 2024-11-22T13:38:42,788 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:42,788 INFO [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(1482): Serving as e025332d312f,45083,1732282722386, RpcServer on e025332d312f/172.17.0.2:45083, sessionid=0x10162c4a34b0001 2024-11-22T13:38:42,788 DEBUG [RS:0;e025332d312f:45083 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T13:38:42,788 DEBUG [RS:0;e025332d312f:45083 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e025332d312f,45083,1732282722386 2024-11-22T13:38:42,788 DEBUG [RS:0;e025332d312f:45083 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,45083,1732282722386' 2024-11-22T13:38:42,788 DEBUG [RS:0;e025332d312f:45083 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T13:38:42,789 DEBUG [RS:0;e025332d312f:45083 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T13:38:42,789 DEBUG [RS:0;e025332d312f:45083 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T13:38:42,789 DEBUG [RS:0;e025332d312f:45083 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T13:38:42,789 DEBUG [RS:0;e025332d312f:45083 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e025332d312f,45083,1732282722386 2024-11-22T13:38:42,789 DEBUG [RS:0;e025332d312f:45083 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e025332d312f,45083,1732282722386' 2024-11-22T13:38:42,790 DEBUG [RS:0;e025332d312f:45083 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T13:38:42,790 DEBUG [RS:0;e025332d312f:45083 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T13:38:42,790 DEBUG [RS:0;e025332d312f:45083 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T13:38:42,790 INFO [RS:0;e025332d312f:45083 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T13:38:42,790 INFO [RS:0;e025332d312f:45083 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T13:38:42,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:42,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:42,894 INFO [RS:0;e025332d312f:45083 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C45083%2C1732282722386, suffix=, logDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/e025332d312f,45083,1732282722386, archiveDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/oldWALs, maxLogs=32 2024-11-22T13:38:42,896 INFO [RS:0;e025332d312f:45083 {}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45083%2C1732282722386.1732282722895 2024-11-22T13:38:42,898 WARN [e025332d312f:41717 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T13:38:42,903 INFO [RS:0;e025332d312f:45083 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/e025332d312f,45083,1732282722386/e025332d312f%2C45083%2C1732282722386.1732282722895 2024-11-22T13:38:42,908 DEBUG [RS:0;e025332d312f:45083 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44053:44053),(127.0.0.1/127.0.0.1:37699:37699)] 2024-11-22T13:38:43,149 DEBUG [e025332d312f:41717 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T13:38:43,150 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e025332d312f,45083,1732282722386 2024-11-22T13:38:43,153 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,45083,1732282722386, state=OPENING 2024-11-22T13:38:43,218 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T13:38:43,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:43,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:43,230 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T13:38:43,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:38:43,230 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:38:43,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,45083,1732282722386}] 2024-11-22T13:38:43,387 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T13:38:43,393 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52849, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T13:38:43,398 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T13:38:43,398 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:38:43,399 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e025332d312f%2C45083%2C1732282722386.meta, suffix=.meta, logDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/e025332d312f,45083,1732282722386, archiveDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/oldWALs, maxLogs=32 2024-11-22T13:38:43,399 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor e025332d312f%2C45083%2C1732282722386.meta.1732282723399.meta 2024-11-22T13:38:43,407 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/e025332d312f,45083,1732282722386/e025332d312f%2C45083%2C1732282722386.meta.1732282723399.meta 2024-11-22T13:38:43,411 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37699:37699),(127.0.0.1/127.0.0.1:44053:44053)] 2024-11-22T13:38:43,412 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T13:38:43,413 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T13:38:43,413 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T13:38:43,413 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T13:38:43,413 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T13:38:43,413 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T13:38:43,413 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T13:38:43,413 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T13:38:43,414 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T13:38:43,415 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T13:38:43,415 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:43,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:43,415 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T13:38:43,416 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T13:38:43,416 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:43,416 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:43,417 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T13:38:43,417 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T13:38:43,417 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:43,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:43,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T13:38:43,418 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T13:38:43,418 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T13:38:43,419 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T13:38:43,419 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T13:38:43,420 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740 2024-11-22T13:38:43,421 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740 2024-11-22T13:38:43,422 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T13:38:43,422 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T13:38:43,422 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T13:38:43,424 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T13:38:43,424 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732730, jitterRate=-0.06828628480434418}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T13:38:43,424 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T13:38:43,425 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732282723413Writing region info on filesystem at 1732282723413Initializing all the Stores at 1732282723414 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282723414Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282723414Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732282723414Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732282723414Cleaning up temporary data from old regions at 1732282723422 (+8 ms)Running coprocessor post-open hooks at 1732282723424 (+2 ms)Region opened successfully at 1732282723425 (+1 ms) 2024-11-22T13:38:43,426 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732282723387 2024-11-22T13:38:43,428 DEBUG [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T13:38:43,428 INFO [RS_OPEN_META-regionserver/e025332d312f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T13:38:43,429 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e025332d312f,45083,1732282722386 2024-11-22T13:38:43,429 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e025332d312f,45083,1732282722386, state=OPEN 2024-11-22T13:38:43,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:38:43,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T13:38:43,470 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=e025332d312f,45083,1732282722386 2024-11-22T13:38:43,470 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:38:43,470 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T13:38:43,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T13:38:43,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=e025332d312f,45083,1732282722386 in 240 msec 2024-11-22T13:38:43,474 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T13:38:43,474 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 726 msec 2024-11-22T13:38:43,475 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T13:38:43,475 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T13:38:43,476 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:38:43,476 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,45083,1732282722386, seqNum=-1] 2024-11-22T13:38:43,476 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:38:43,477 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55327, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:38:43,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 776 msec 2024-11-22T13:38:43,482 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732282723482, completionTime=-1 2024-11-22T13:38:43,482 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T13:38:43,482 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T13:38:43,484 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T13:38:43,484 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732282783484 2024-11-22T13:38:43,484 INFO [master/e025332d312f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732282843484 2024-11-22T13:38:43,484 INFO [master/e025332d312f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-22T13:38:43,484 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41717,1732282722226-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:43,484 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41717,1732282722226-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:43,484 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41717,1732282722226-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:43,484 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e025332d312f:41717, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:43,484 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:43,485 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:43,486 DEBUG [master/e025332d312f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T13:38:43,488 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.039sec 2024-11-22T13:38:43,488 INFO [master/e025332d312f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T13:38:43,488 INFO [master/e025332d312f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T13:38:43,488 INFO [master/e025332d312f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T13:38:43,488 INFO [master/e025332d312f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T13:38:43,488 INFO [master/e025332d312f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T13:38:43,488 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41717,1732282722226-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T13:38:43,488 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41717,1732282722226-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T13:38:43,491 DEBUG [master/e025332d312f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T13:38:43,491 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T13:38:43,491 INFO [master/e025332d312f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e025332d312f,41717,1732282722226-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T13:38:43,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@338da4b3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:38:43,507 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request e025332d312f,41717,-1 for getting cluster id 2024-11-22T13:38:43,507 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T13:38:43,509 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0c2487c2-82a8-4a13-92f3-70a0e1d9add3' 2024-11-22T13:38:43,509 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T13:38:43,509 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0c2487c2-82a8-4a13-92f3-70a0e1d9add3" 2024-11-22T13:38:43,509 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@feb32c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:38:43,509 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [e025332d312f,41717,-1] 2024-11-22T13:38:43,509 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T13:38:43,510 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:43,510 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46362, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T13:38:43,511 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e59acdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T13:38:43,512 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T13:38:43,513 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=e025332d312f,45083,1732282722386, seqNum=-1] 2024-11-22T13:38:43,513 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T13:38:43,514 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60250, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T13:38:43,515 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=e025332d312f,41717,1732282722226 2024-11-22T13:38:43,516 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T13:38:43,519 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T13:38:43,519 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T13:38:43,521 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/test.com,8080,1, archiveDir=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/oldWALs, maxLogs=32 2024-11-22T13:38:43,521 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732282723521 2024-11-22T13:38:43,526 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/test.com,8080,1/test.com%2C8080%2C1.1732282723521 2024-11-22T13:38:43,528 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44053:44053),(127.0.0.1/127.0.0.1:37699:37699)] 2024-11-22T13:38:43,532 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732282723532 2024-11-22T13:38:43,539 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,539 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,540 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,540 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,540 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,540 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/test.com,8080,1/test.com%2C8080%2C1.1732282723521 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/test.com,8080,1/test.com%2C8080%2C1.1732282723532 2024-11-22T13:38:43,542 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37699:37699),(127.0.0.1/127.0.0.1:44053:44053)] 2024-11-22T13:38:43,542 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/test.com,8080,1/test.com%2C8080%2C1.1732282723521 is not closed yet, will try archiving it next time 2024-11-22T13:38:43,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741835_1011 (size=93) 2024-11-22T13:38:43,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741835_1011 (size=93) 2024-11-22T13:38:43,543 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,543 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,543 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,543 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,543 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,543 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/WALs/test.com,8080,1/test.com%2C8080%2C1.1732282723521 to hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/oldWALs/test.com%2C8080%2C1.1732282723521 2024-11-22T13:38:43,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741836_1012 (size=93) 2024-11-22T13:38:43,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741836_1012 (size=93) 2024-11-22T13:38:43,547 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/oldWALs 2024-11-22T13:38:43,547 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732282723532) 2024-11-22T13:38:43,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T13:38:43,548 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:38:43,548 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:38:43,548 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:43,548 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:43,548 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T13:38:43,548 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T13:38:43,548 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1457825785, stopped=false 2024-11-22T13:38:43,548 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=e025332d312f,41717,1732282722226 2024-11-22T13:38:43,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:38:43,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T13:38:43,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:43,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:43,565 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:38:43,565 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T13:38:43,565 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:38:43,565 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:43,565 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:38:43,565 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T13:38:43,565 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'e025332d312f,45083,1732282722386' ***** 2024-11-22T13:38:43,565 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T13:38:43,566 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(959): stopping server e025332d312f,45083,1732282722386 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;e025332d312f:45083. 2024-11-22T13:38:43,566 DEBUG [RS:0;e025332d312f:45083 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T13:38:43,566 DEBUG [RS:0;e025332d312f:45083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T13:38:43,566 INFO [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T13:38:43,566 DEBUG [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T13:38:43,566 DEBUG [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T13:38:43,567 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T13:38:43,567 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T13:38:43,567 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T13:38:43,567 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T13:38:43,567 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T13:38:43,567 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T13:38:43,581 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740/.tmp/ns/2f2279fe8c2344ca9cf405be58b31e6a is 43, key is default/ns:d/1732282723478/Put/seqid=0 2024-11-22T13:38:43,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741837_1013 (size=5153) 2024-11-22T13:38:43,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741837_1013 (size=5153) 2024-11-22T13:38:43,586 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740/.tmp/ns/2f2279fe8c2344ca9cf405be58b31e6a 2024-11-22T13:38:43,591 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740/.tmp/ns/2f2279fe8c2344ca9cf405be58b31e6a as hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740/ns/2f2279fe8c2344ca9cf405be58b31e6a 2024-11-22T13:38:43,596 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740/ns/2f2279fe8c2344ca9cf405be58b31e6a, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T13:38:43,597 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false 2024-11-22T13:38:43,601 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T13:38:43,602 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T13:38:43,602 INFO [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T13:38:43,602 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732282723567Running coprocessor pre-close hooks at 1732282723567Disabling compacts and flushes for region at 1732282723567Disabling writes for close at 1732282723567Obtaining lock to block concurrent updates at 1732282723567Preparing flush snapshotting stores in 1588230740 at 1732282723567Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732282723567Flushing stores of hbase:meta,,1.1588230740 at 1732282723568 (+1 ms)Flushing 1588230740/ns: creating writer at 1732282723568Flushing 1588230740/ns: appending metadata at 1732282723581 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1732282723581Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d77e788: reopening flushed file at 1732282723590 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 30ms, sequenceid=6, compaction requested=false at 1732282723597 (+7 ms)Writing region close event to WAL at 1732282723598 (+1 ms)Running coprocessor post-close hooks at 1732282723602 (+4 ms)Closed at 1732282723602 2024-11-22T13:38:43,602 DEBUG [RS_CLOSE_META-regionserver/e025332d312f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T13:38:43,767 INFO [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(976): stopping server e025332d312f,45083,1732282722386; all regions closed. 2024-11-22T13:38:43,768 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,768 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,768 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,768 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,768 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741834_1010 (size=1152) 2024-11-22T13:38:43,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741834_1010 (size=1152) 2024-11-22T13:38:43,777 DEBUG [RS:0;e025332d312f:45083 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/oldWALs 2024-11-22T13:38:43,777 INFO [RS:0;e025332d312f:45083 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C45083%2C1732282722386.meta:.meta(num 1732282723399) 2024-11-22T13:38:43,778 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,779 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,779 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,779 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,779 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:43,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741833_1009 (size=93) 2024-11-22T13:38:43,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741833_1009 (size=93) 2024-11-22T13:38:43,783 DEBUG [RS:0;e025332d312f:45083 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/oldWALs 2024-11-22T13:38:43,783 INFO [RS:0;e025332d312f:45083 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog e025332d312f%2C45083%2C1732282722386:(num 1732282722895) 2024-11-22T13:38:43,783 DEBUG [RS:0;e025332d312f:45083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T13:38:43,783 INFO [RS:0;e025332d312f:45083 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T13:38:43,783 INFO [RS:0;e025332d312f:45083 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:38:43,783 INFO [RS:0;e025332d312f:45083 {}] hbase.ChoreService(370): Chore service for: regionserver/e025332d312f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T13:38:43,783 INFO [RS:0;e025332d312f:45083 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:38:43,783 INFO [regionserver/e025332d312f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:38:43,783 INFO [RS:0;e025332d312f:45083 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45083 2024-11-22T13:38:43,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e025332d312f,45083,1732282722386 2024-11-22T13:38:43,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T13:38:43,796 INFO [RS:0;e025332d312f:45083 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:38:43,807 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e025332d312f,45083,1732282722386] 2024-11-22T13:38:43,817 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/e025332d312f,45083,1732282722386 already deleted, retry=false 2024-11-22T13:38:43,817 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; e025332d312f,45083,1732282722386 expired; onlineServers=0 2024-11-22T13:38:43,817 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'e025332d312f,41717,1732282722226' ***** 2024-11-22T13:38:43,817 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T13:38:43,818 INFO [M:0;e025332d312f:41717 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T13:38:43,818 INFO [M:0;e025332d312f:41717 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T13:38:43,818 DEBUG [M:0;e025332d312f:41717 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T13:38:43,818 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T13:38:43,818 DEBUG [M:0;e025332d312f:41717 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T13:38:43,818 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282722717 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.small.0-1732282722717,5,FailOnTimeoutGroup] 2024-11-22T13:38:43,818 DEBUG [master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282722716 {}] cleaner.HFileCleaner(306): Exit Thread[master/e025332d312f:0:becomeActiveMaster-HFileCleaner.large.0-1732282722716,5,FailOnTimeoutGroup] 2024-11-22T13:38:43,818 INFO [M:0;e025332d312f:41717 {}] hbase.ChoreService(370): Chore service for: master/e025332d312f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T13:38:43,819 INFO [M:0;e025332d312f:41717 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T13:38:43,819 DEBUG [M:0;e025332d312f:41717 {}] master.HMaster(1795): Stopping service threads 2024-11-22T13:38:43,819 INFO [M:0;e025332d312f:41717 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T13:38:43,819 INFO [M:0;e025332d312f:41717 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T13:38:43,819 INFO [M:0;e025332d312f:41717 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T13:38:43,819 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T13:38:43,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,41959,1732282540115/e025332d312f%2C41959%2C1732282540115.meta.1732282541217.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:43,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46035/user/jenkins/test-data/b36b0de8-471a-cbc5-7f01-b4ae623188f7/WALs/e025332d312f,40403,1732282541385/e025332d312f%2C40403%2C1732282541385.1732282541623 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T13:38:43,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T13:38:43,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T13:38:43,904 DEBUG [M:0;e025332d312f:41717 {}] zookeeper.ZKUtil(347): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T13:38:43,905 WARN [M:0;e025332d312f:41717 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T13:38:43,906 INFO [M:0;e025332d312f:41717 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/.lastflushedseqids 2024-11-22T13:38:43,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:38:43,907 INFO [RS:0;e025332d312f:45083 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:38:43,907 INFO [RS:0;e025332d312f:45083 {}] regionserver.HRegionServer(1031): Exiting; stopping=e025332d312f,45083,1732282722386; zookeeper connection closed. 2024-11-22T13:38:43,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45083-0x10162c4a34b0001, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:38:43,908 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@34cd48c9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@34cd48c9 2024-11-22T13:38:43,909 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T13:38:43,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741838_1014 (size=99) 2024-11-22T13:38:43,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741838_1014 (size=99) 2024-11-22T13:38:43,917 INFO [M:0;e025332d312f:41717 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T13:38:43,917 INFO [M:0;e025332d312f:41717 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T13:38:43,917 DEBUG [M:0;e025332d312f:41717 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T13:38:43,917 INFO [M:0;e025332d312f:41717 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:43,917 DEBUG [M:0;e025332d312f:41717 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:43,917 DEBUG [M:0;e025332d312f:41717 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T13:38:43,917 DEBUG [M:0;e025332d312f:41717 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:43,917 INFO [M:0;e025332d312f:41717 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T13:38:43,933 DEBUG [M:0;e025332d312f:41717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/965a5efd13a2437494a75d49552231c8 is 82, key is hbase:meta,,1/info:regioninfo/1732282723429/Put/seqid=0 2024-11-22T13:38:43,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741839_1015 (size=5672) 2024-11-22T13:38:43,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741839_1015 (size=5672) 2024-11-22T13:38:43,937 INFO [M:0;e025332d312f:41717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/965a5efd13a2437494a75d49552231c8 2024-11-22T13:38:43,956 DEBUG [M:0;e025332d312f:41717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a7653bd71bbc41e082434f0a31038fc6 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732282723481/Put/seqid=0 2024-11-22T13:38:43,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741840_1016 (size=5275) 2024-11-22T13:38:43,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741840_1016 (size=5275) 2024-11-22T13:38:43,961 INFO [M:0;e025332d312f:41717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a7653bd71bbc41e082434f0a31038fc6 2024-11-22T13:38:43,979 DEBUG [M:0;e025332d312f:41717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/934d350b3b874920bb77220ffafb1df4 is 69, key is e025332d312f,45083,1732282722386/rs:state/1732282722742/Put/seqid=0 2024-11-22T13:38:43,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741841_1017 (size=5156) 2024-11-22T13:38:43,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741841_1017 (size=5156) 2024-11-22T13:38:43,984 INFO [M:0;e025332d312f:41717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/934d350b3b874920bb77220ffafb1df4 2024-11-22T13:38:44,003 DEBUG [M:0;e025332d312f:41717 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90d237a3875c4236829bf9a5aa6dde87 is 52, key is load_balancer_on/state:d/1732282723518/Put/seqid=0 2024-11-22T13:38:44,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741842_1018 (size=5056) 2024-11-22T13:38:44,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741842_1018 (size=5056) 2024-11-22T13:38:44,007 INFO [M:0;e025332d312f:41717 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90d237a3875c4236829bf9a5aa6dde87 2024-11-22T13:38:44,012 DEBUG [M:0;e025332d312f:41717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/965a5efd13a2437494a75d49552231c8 as hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/965a5efd13a2437494a75d49552231c8 2024-11-22T13:38:44,016 INFO [M:0;e025332d312f:41717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/965a5efd13a2437494a75d49552231c8, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T13:38:44,017 DEBUG [M:0;e025332d312f:41717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a7653bd71bbc41e082434f0a31038fc6 as hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a7653bd71bbc41e082434f0a31038fc6 2024-11-22T13:38:44,020 INFO [M:0;e025332d312f:41717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a7653bd71bbc41e082434f0a31038fc6, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T13:38:44,021 DEBUG [M:0;e025332d312f:41717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/934d350b3b874920bb77220ffafb1df4 as hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/934d350b3b874920bb77220ffafb1df4 2024-11-22T13:38:44,025 INFO [M:0;e025332d312f:41717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/934d350b3b874920bb77220ffafb1df4, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T13:38:44,026 DEBUG [M:0;e025332d312f:41717 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/90d237a3875c4236829bf9a5aa6dde87 as hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/90d237a3875c4236829bf9a5aa6dde87 2024-11-22T13:38:44,030 INFO [M:0;e025332d312f:41717 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40981/user/jenkins/test-data/9a207765-cbd8-84f4-b27c-dd7be6b9bae7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/90d237a3875c4236829bf9a5aa6dde87, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T13:38:44,031 INFO [M:0;e025332d312f:41717 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false 2024-11-22T13:38:44,032 INFO [M:0;e025332d312f:41717 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T13:38:44,032 DEBUG [M:0;e025332d312f:41717 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732282723917Disabling compacts and flushes for region at 1732282723917Disabling writes for close at 1732282723917Obtaining lock to block concurrent updates at 1732282723917Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732282723917Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732282723918 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732282723918Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732282723919 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732282723932 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732282723932Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732282723942 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732282723956 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732282723956Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732282723965 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732282723978 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732282723978Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732282723987 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732282724002 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732282724002Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32d1bf5d: reopening flushed file at 1732282724011 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79ecdddf: reopening flushed file at 1732282724016 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6995e680: reopening flushed file at 1732282724020 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43e00f18: reopening flushed file at 1732282724025 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false at 1732282724031 (+6 ms)Writing region close event to WAL at 1732282724032 (+1 ms)Closed at 1732282724032 2024-11-22T13:38:44,033 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:44,033 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:44,033 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:44,033 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:44,033 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T13:38:44,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40549 is added to blk_1073741830_1006 (size=10311) 2024-11-22T13:38:44,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45201 is added to blk_1073741830_1006 (size=10311) 2024-11-22T13:38:44,035 INFO [M:0;e025332d312f:41717 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T13:38:44,035 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T13:38:44,035 INFO [M:0;e025332d312f:41717 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41717 2024-11-22T13:38:44,035 INFO [M:0;e025332d312f:41717 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T13:38:44,160 INFO [M:0;e025332d312f:41717 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T13:38:44,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:38:44,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41717-0x10162c4a34b0000, quorum=127.0.0.1:50360, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T13:38:44,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@393b35ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:38:44,165 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51c0efa3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:38:44,165 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:38:44,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e54a8c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:38:44,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c78c12c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/hadoop.log.dir/,STOPPED} 2024-11-22T13:38:44,167 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:38:44,167 WARN [BP-277395602-172.17.0.2-1732282719861 heartbeating to localhost/127.0.0.1:40981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:38:44,167 WARN [BP-277395602-172.17.0.2-1732282719861 heartbeating to localhost/127.0.0.1:40981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-277395602-172.17.0.2-1732282719861 (Datanode Uuid dd626bab-b615-4e57-ba0b-a9baec66e793) service to localhost/127.0.0.1:40981 2024-11-22T13:38:44,167 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:38:44,169 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/data/data3/current/BP-277395602-172.17.0.2-1732282719861 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:38:44,169 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/data/data4/current/BP-277395602-172.17.0.2-1732282719861 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:38:44,169 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:38:44,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11ec225b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T13:38:44,172 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6df20715{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:38:44,172 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:38:44,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e0b3b7c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:38:44,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59532081{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/hadoop.log.dir/,STOPPED} 2024-11-22T13:38:44,173 WARN [BP-277395602-172.17.0.2-1732282719861 heartbeating to localhost/127.0.0.1:40981 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T13:38:44,173 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T13:38:44,173 WARN [BP-277395602-172.17.0.2-1732282719861 heartbeating to localhost/127.0.0.1:40981 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-277395602-172.17.0.2-1732282719861 (Datanode Uuid 3722ec9a-a445-49f8-a5e2-364857838e95) service to localhost/127.0.0.1:40981 2024-11-22T13:38:44,173 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T13:38:44,173 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/data/data1/current/BP-277395602-172.17.0.2-1732282719861 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:38:44,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/cluster_c5ad3e5b-a120-3835-9ea9-c30e06580445/data/data2/current/BP-277395602-172.17.0.2-1732282719861 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T13:38:44,174 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T13:38:44,178 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@cbcac8c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T13:38:44,179 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4cb35637{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T13:38:44,179 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T13:38:44,179 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e4376a4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T13:38:44,179 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2601a9a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/37596f84-7a73-e3af-bdae-62114dc230e3/hadoop.log.dir/,STOPPED} 2024-11-22T13:38:44,185 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T13:38:44,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T13:38:44,209 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=272 (was 233) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40981 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:40981 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:40981 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40981 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:40981 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40981 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=544 (was 521) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=70 (was 50) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2308 (was 2316)